diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index ee532a27910..fd398151a7c 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -1,7 +1,7 @@ import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { LLMChain } from 'langchain/chains' -import { BaseLanguageModel } from 'langchain/base_language' +import { BaseLanguageModel, BaseLanguageModelCallOptions } from 'langchain/base_language' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { BaseOutputParser } from 'langchain/schema/output_parser' import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' @@ -141,7 +141,7 @@ class LLMChain_Chains implements INode { const runPrediction = async ( inputVariables: string[], - chain: LLMChain, + chain: LLMChain>, input: string, promptValuesRaw: ICommonObject | undefined, options: ICommonObject, @@ -164,7 +164,7 @@ const runPrediction = async ( if (moderations && moderations.length > 0) { try { // Use the output of the moderation chain as input for the LLM chain - input = await checkInputs(moderations, chain.llm, input) + input = await checkInputs(moderations, input) } catch (e) { await new Promise((resolve) => setTimeout(resolve, 500)) streamResponse(isStreaming, e.message, socketIO, socketIOClientId) diff --git a/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts b/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts index ade46ab94d2..956fcdb3389 100644 --- a/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts +++ b/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts @@ -27,7 +27,7 @@ class AWSChatBedrock_ChatModels implements INode { constructor() { this.label = 'AWS Bedrock' this.name = 'awsChatBedrock' - this.version = 2.0 + this.version = 3.0 this.type = 'AWSChatBedrock' this.icon = 'awsBedrock.png' this.category = 'Chat Models' @@ -97,7 +97,8 @@ class AWSChatBedrock_ChatModels implements INode { options: [ { label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' }, { label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' }, - { label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' } + { label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' }, + { label: 'meta.llama2-13b-chat-v1', name: 'meta.llama2-13b-chat-v1' } ], default: 'anthropic.claude-v2' }, @@ -128,12 +129,14 @@ class AWSChatBedrock_ChatModels implements INode { const iTemperature = nodeData.inputs?.temperature as string const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string const cache = nodeData.inputs?.cache as BaseCache + const streaming = nodeData.inputs?.streaming as boolean const obj: BaseBedrockInput & BaseLLMParams = { region: iRegion, model: iModel, maxTokens: parseInt(iMax_tokens_to_sample, 10), - temperature: parseFloat(iTemperature) + temperature: parseFloat(iTemperature), + streaming: streaming ?? true } /** diff --git a/packages/components/nodes/embeddings/AWSBedrockEmbedding/AWSBedrockEmbedding.ts b/packages/components/nodes/embeddings/AWSBedrockEmbedding/AWSBedrockEmbedding.ts index ba2aa5e7d35..8249d5121dc 100644 --- a/packages/components/nodes/embeddings/AWSBedrockEmbedding/AWSBedrockEmbedding.ts +++ b/packages/components/nodes/embeddings/AWSBedrockEmbedding/AWSBedrockEmbedding.ts @@ -18,7 +18,7 @@ class AWSBedrockEmbedding_Embeddings implements INode { constructor() { this.label = 'AWS Bedrock Embeddings' this.name = 'AWSBedrockEmbeddings' - this.version = 1.0 + this.version = 2.0 this.type = 'AWSBedrockEmbeddings' this.icon = 'awsBedrock.png' this.category = 'Embeddings' @@ -81,7 +81,9 @@ class AWSBedrockEmbedding_Embeddings implements INode { type: 'options', options: [ { label: 'amazon.titan-embed-text-v1', name: 'amazon.titan-embed-text-v1' }, - { label: 'amazon.titan-embed-g1-text-02', name: 'amazon.titan-embed-g1-text-02' } + { label: 'amazon.titan-embed-g1-text-02', name: 'amazon.titan-embed-g1-text-02' }, + { label: 'cohere.embed-english-v3', name: 'cohere.embed-english-v3' }, + { label: 'cohere.embed-multilingual-v3', name: 'cohere.embed-multilingual-v3' } ], default: 'amazon.titan-embed-text-v1' } diff --git a/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts b/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts index b67219f37fc..177a32ef92c 100644 --- a/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts +++ b/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts @@ -27,7 +27,7 @@ class AWSBedrock_LLMs implements INode { constructor() { this.label = 'AWS Bedrock' this.name = 'awsBedrock' - this.version = 1.2 + this.version = 2.0 this.type = 'AWSBedrock' this.icon = 'awsBedrock.png' this.category = 'LLMs' @@ -98,6 +98,7 @@ class AWSBedrock_LLMs implements INode { { label: 'amazon.titan-tg1-large', name: 'amazon.titan-tg1-large' }, { label: 'amazon.titan-e1t-medium', name: 'amazon.titan-e1t-medium' }, { label: 'cohere.command-text-v14', name: 'cohere.command-text-v14' }, + { label: 'cohere.command-light-text-v14', name: 'cohere.command-light-text-v14' }, { label: 'ai21.j2-grande-instruct', name: 'ai21.j2-grande-instruct' }, { label: 'ai21.j2-jumbo-instruct', name: 'ai21.j2-jumbo-instruct' }, { label: 'ai21.j2-mid', name: 'ai21.j2-mid' }, diff --git a/packages/components/nodes/moderation/Moderation.ts b/packages/components/nodes/moderation/Moderation.ts index 9c40f55ab45..9fd2bfde340 100644 --- a/packages/components/nodes/moderation/Moderation.ts +++ b/packages/components/nodes/moderation/Moderation.ts @@ -1,13 +1,12 @@ -import { BaseLanguageModel } from 'langchain/base_language' import { Server } from 'socket.io' export abstract class Moderation { - abstract checkForViolations(llm: BaseLanguageModel, input: string): Promise + abstract checkForViolations(input: string): Promise } -export const checkInputs = async (inputModerations: Moderation[], llm: BaseLanguageModel, input: string): Promise => { +export const checkInputs = async (inputModerations: Moderation[], input: string): Promise => { for (const moderation of inputModerations) { - input = await moderation.checkForViolations(llm, input) + input = await moderation.checkForViolations(input) } return input } diff --git a/packages/components/nodes/moderation/OpenAIModeration/OpenAIModeration.ts b/packages/components/nodes/moderation/OpenAIModeration/OpenAIModeration.ts index 5233f174f15..51578630c8a 100644 --- a/packages/components/nodes/moderation/OpenAIModeration/OpenAIModeration.ts +++ b/packages/components/nodes/moderation/OpenAIModeration/OpenAIModeration.ts @@ -1,5 +1,5 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses } from '../../../src' +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src' import { Moderation } from '../Moderation' import { OpenAIModerationRunner } from './OpenAIModerationRunner' @@ -12,6 +12,7 @@ class OpenAIModeration implements INode { icon: string category: string baseClasses: string[] + credential: INodeParams inputs: INodeParams[] constructor() { @@ -23,6 +24,12 @@ class OpenAIModeration implements INode { this.category = 'Moderation' this.description = 'Check whether content complies with OpenAI usage policies.' this.baseClasses = [this.type, ...getBaseClasses(Moderation)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } this.inputs = [ { label: 'Error Message', @@ -35,8 +42,11 @@ class OpenAIModeration implements INode { ] } - async init(nodeData: INodeData): Promise { - const runner = new OpenAIModerationRunner() + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + + const runner = new OpenAIModerationRunner(openAIApiKey) const moderationErrorMessage = nodeData.inputs?.moderationErrorMessage as string if (moderationErrorMessage) runner.setErrorMessage(moderationErrorMessage) return runner diff --git a/packages/components/nodes/moderation/OpenAIModeration/OpenAIModerationRunner.ts b/packages/components/nodes/moderation/OpenAIModeration/OpenAIModerationRunner.ts index c517f419a7f..3a3ec550273 100644 --- a/packages/components/nodes/moderation/OpenAIModeration/OpenAIModerationRunner.ts +++ b/packages/components/nodes/moderation/OpenAIModeration/OpenAIModerationRunner.ts @@ -1,18 +1,21 @@ import { Moderation } from '../Moderation' -import { BaseLanguageModel } from 'langchain/base_language' import { OpenAIModerationChain } from 'langchain/chains' export class OpenAIModerationRunner implements Moderation { + private openAIApiKey = '' private moderationErrorMessage: string = "Text was found that violates OpenAI's content policy." - async checkForViolations(llm: BaseLanguageModel, input: string): Promise { - const openAIApiKey = (llm as any).openAIApiKey - if (!openAIApiKey) { + constructor(openAIApiKey: string) { + this.openAIApiKey = openAIApiKey + } + + async checkForViolations(input: string): Promise { + if (!this.openAIApiKey) { throw Error('OpenAI API key not found') } // Create a new instance of the OpenAIModerationChain const moderation = new OpenAIModerationChain({ - openAIApiKey: openAIApiKey, + openAIApiKey: this.openAIApiKey, throwError: false // If set to true, the call will throw an error when the moderation chain detects violating content. If set to false, violating content will return "Text was found that violates OpenAI's content policy.". }) // Send the user's input to the moderation chain and wait for the result diff --git a/packages/components/nodes/moderation/SimplePromptModeration/SimplePromptModerationRunner.ts b/packages/components/nodes/moderation/SimplePromptModeration/SimplePromptModerationRunner.ts index 7fc251ad48a..94967ba2d35 100644 --- a/packages/components/nodes/moderation/SimplePromptModeration/SimplePromptModerationRunner.ts +++ b/packages/components/nodes/moderation/SimplePromptModeration/SimplePromptModerationRunner.ts @@ -1,5 +1,4 @@ import { Moderation } from '../Moderation' -import { BaseLanguageModel } from 'langchain/base_language' export class SimplePromptModerationRunner implements Moderation { private readonly denyList: string = '' @@ -13,7 +12,7 @@ export class SimplePromptModerationRunner implements Moderation { this.moderationErrorMessage = moderationErrorMessage } - async checkForViolations(_: BaseLanguageModel, input: string): Promise { + async checkForViolations(input: string): Promise { this.denyList.split('\n').forEach((denyListItem) => { if (denyListItem && denyListItem !== '' && input.includes(denyListItem)) { throw Error(this.moderationErrorMessage) diff --git a/packages/components/nodes/outputparsers/OutputParserHelpers.ts b/packages/components/nodes/outputparsers/OutputParserHelpers.ts index a94edddd3d7..8ea77e6bf67 100644 --- a/packages/components/nodes/outputparsers/OutputParserHelpers.ts +++ b/packages/components/nodes/outputparsers/OutputParserHelpers.ts @@ -1,6 +1,6 @@ import { BaseOutputParser } from 'langchain/schema/output_parser' import { LLMChain } from 'langchain/chains' -import { BaseLanguageModel } from 'langchain/base_language' +import { BaseLanguageModel, BaseLanguageModelCallOptions } from 'langchain/base_language' import { ICommonObject } from '../../src' import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' @@ -15,7 +15,7 @@ export const formatResponse = (response: string | object): string | object => { export const injectOutputParser = ( outputParser: BaseOutputParser, - chain: LLMChain, + chain: LLMChain>, promptValues: ICommonObject | undefined = undefined ) => { if (outputParser && chain.prompt) { diff --git a/packages/components/nodes/vectorstores/InMemory/InMemoryVectorStore.ts b/packages/components/nodes/vectorstores/InMemory/InMemoryVectorStore.ts index 51394613e42..620c3af7f99 100644 --- a/packages/components/nodes/vectorstores/InMemory/InMemoryVectorStore.ts +++ b/packages/components/nodes/vectorstores/InMemory/InMemoryVectorStore.ts @@ -31,7 +31,8 @@ class InMemoryVectorStore_VectorStores implements INode { label: 'Document', name: 'document', type: 'Document', - list: true + list: true, + optional: true }, { label: 'Embeddings', diff --git a/packages/components/package.json b/packages/components/package.json index 1d4cea573a2..a965a0c7dc2 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -22,7 +22,8 @@ "@dqbd/tiktoken": "^1.0.7", "@elastic/elasticsearch": "^8.9.0", "@getzep/zep-js": "^0.6.3", - "@gomomento/sdk": "^1.40.2", + "@gomomento/sdk": "^1.51.1", + "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "^0.2.1", "@huggingface/inference": "^2.6.1", "@notionhq/client": "^2.2.8", @@ -49,7 +50,7 @@ "html-to-text": "^9.0.5", "husky": "^8.0.3", "ioredis": "^5.3.2", - "langchain": "^0.0.165", + "langchain": "^0.0.196", "langfuse-langchain": "^1.0.31", "langsmith": "^0.0.32", "linkifyjs": "^4.1.1", diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 86d626c44ef..9dbb695eebf 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -844,7 +844,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component */ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => { const streamAvailableLLMs = { - 'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama'], + 'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'], LLMs: ['azureOpenAI', 'openAI', 'ollama'] }