From 45cec31b86242c50c5b592a235a1a0ad4bed3325 Mon Sep 17 00:00:00 2001 From: Zhijie He <hezhijie0327@hotmail.com> Date: Mon, 2 Sep 2024 16:28:12 +0800 Subject: [PATCH 1/5] =?UTF-8?q?=E2=9C=A8=20feat:=20add=20Ai21Labs=20model?= =?UTF-8?q?=20provider?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Dockerfile | 2 + Dockerfile.database | 2 + .../settings/llm/ProviderList/providers.tsx | 2 + src/app/api/chat/agentRuntime.ts | 7 + src/config/llm.ts | 6 + src/config/modelProviders/ai21.ts | 27 ++ src/config/modelProviders/index.ts | 4 + src/const/settings/llm.ts | 5 + .../Error/APIKeyForm/ProviderAvatar.tsx | 5 + src/libs/agent-runtime/AgentRuntime.ts | 7 + src/libs/agent-runtime/ai21/index.test.ts | 255 ++++++++++++++++++ src/libs/agent-runtime/ai21/index.ts | 18 ++ src/libs/agent-runtime/types/type.ts | 1 + src/server/globalConfig/index.ts | 2 + src/types/user/settings/keyVaults.ts | 1 + 15 files changed, 344 insertions(+) create mode 100644 src/config/modelProviders/ai21.ts create mode 100644 src/libs/agent-runtime/ai21/index.test.ts create mode 100644 src/libs/agent-runtime/ai21/index.ts diff --git a/Dockerfile b/Dockerfile index 43fc76e605b7d..2305463a98932 100644 --- a/Dockerfile +++ b/Dockerfile @@ -104,6 +104,8 @@ ENV ACCESS_CODE="" \ # Model Variables ENV \ + # AI21 + AI21_API_KEY="" \ # Ai360 AI360_API_KEY="" \ # Anthropic diff --git a/Dockerfile.database b/Dockerfile.database index 37645f9514fcd..848252b9e5e5f 100644 --- a/Dockerfile.database +++ b/Dockerfile.database @@ -136,6 +136,8 @@ ENV NEXT_PUBLIC_S3_DOMAIN="" \ # Model Variables ENV \ + # AI21 + AI21_API_KEY="" \ # Ai360 AI360_API_KEY="" \ # Anthropic diff --git a/src/app/(main)/settings/llm/ProviderList/providers.tsx b/src/app/(main)/settings/llm/ProviderList/providers.tsx index 645c7d3367966..69e8ba83d74a7 100644 --- a/src/app/(main)/settings/llm/ProviderList/providers.tsx +++ b/src/app/(main)/settings/llm/ProviderList/providers.tsx @@ -1,6 +1,7 @@ import { useMemo } from 'react'; import { + Ai21ProviderCard, Ai360ProviderCard, AnthropicProviderCard, BaichuanProviderCard, @@ -61,6 +62,7 @@ export const useProviderList = (): ProviderItem[] => { Ai360ProviderCard, SiliconCloudProviderCard, UpstageProviderCard, + Ai21ProviderCard, ], [AzureProvider, OllamaProvider, OpenAIProvider, BedrockProvider], ); diff --git a/src/app/api/chat/agentRuntime.ts b/src/app/api/chat/agentRuntime.ts index 05571c96ef0e9..779e9f40213de 100644 --- a/src/app/api/chat/agentRuntime.ts +++ b/src/app/api/chat/agentRuntime.ts @@ -213,6 +213,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => { const apiKey = apiKeyManager.pick(payload?.apiKey || UPSTAGE_API_KEY); + return { apiKey }; + } + case ModelProvider.Ai21: { + const { AI21_API_KEY } = getLLMConfig(); + + const apiKey = apiKeyManager.pick(payload?.apiKey || AI21_API_KEY); + return { apiKey }; } } diff --git a/src/config/llm.ts b/src/config/llm.ts index a477eeaf92261..f286ca09bcba6 100644 --- a/src/config/llm.ts +++ b/src/config/llm.ts @@ -100,6 +100,9 @@ export const getLLMConfig = () => { ENABLED_UPSTAGE: z.boolean(), UPSTAGE_API_KEY: z.string().optional(), + + ENABLED_AI21: z.boolean(), + AI21_API_KEY: z.string().optional(), }, runtimeEnv: { API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE, @@ -197,6 +200,9 @@ export const getLLMConfig = () => { ENABLED_UPSTAGE: !!process.env.UPSTAGE_API_KEY, UPSTAGE_API_KEY: process.env.UPSTAGE_API_KEY, + + ENABLED_AI21: !!process.env.AI21_API_KEY, + AI21_API_KEY: process.env.AI21_API_KEY, }, }); }; diff --git a/src/config/modelProviders/ai21.ts b/src/config/modelProviders/ai21.ts new file mode 100644 index 0000000000000..bcb868c1a67cb --- /dev/null +++ b/src/config/modelProviders/ai21.ts @@ -0,0 +1,27 @@ +import { ModelProviderCard } from '@/types/llm'; + +// ref https://docs.ai21.com/reference/jamba-15-api-ref +const Ai21: ModelProviderCard = { + chatModels: [ + { + displayName: 'Jamba 1.5 Mini', + enabled: true, + functionCall: true, + id: 'jamba-1.5-mini', + tokens: 256_000, + }, + { + displayName: 'Jamba 1.5 Large', + enabled: true, + functionCall: true, + id: 'jamba-1.5-large', + tokens: 256_000, + }, + ], + checkModel: 'jamba-1.5-mini', + id: 'ai21', + modelList: { showModelFetcher: true }, + name: 'Ai21Labs', +}; + +export default Ai21; diff --git a/src/config/modelProviders/index.ts b/src/config/modelProviders/index.ts index f2f382f17b9b1..b25f765f04370 100644 --- a/src/config/modelProviders/index.ts +++ b/src/config/modelProviders/index.ts @@ -1,5 +1,6 @@ import { ChatModelCard, ModelProviderCard } from '@/types/llm'; +import Ai21Provider from './ai21'; import Ai360Provider from './ai360'; import AnthropicProvider from './anthropic'; import AzureProvider from './azure'; @@ -49,6 +50,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [ Ai360Provider.chatModels, SiliconCloudProvider.chatModels, UpstageProvider.chatModels, + Ai21Provider.chatModels, ].flat(); export const DEFAULT_MODEL_PROVIDER_LIST = [ @@ -76,6 +78,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [ Ai360Provider, SiliconCloudProvider, UpstageProvider, + Ai21Provider, ]; export const filterEnabledModels = (provider: ModelProviderCard) => { @@ -87,6 +90,7 @@ export const isProviderDisableBroswerRequest = (id: string) => { return !!provider; }; +export { default as Ai21ProviderCard } from './ai21'; export { default as Ai360ProviderCard } from './ai360'; export { default as AnthropicProviderCard } from './anthropic'; export { default as AzureProviderCard } from './azure'; diff --git a/src/const/settings/llm.ts b/src/const/settings/llm.ts index 6056265a074f2..d419eb0141555 100644 --- a/src/const/settings/llm.ts +++ b/src/const/settings/llm.ts @@ -1,4 +1,5 @@ import { + Ai21ProviderCard, Ai360ProviderCard, AnthropicProviderCard, BaichuanProviderCard, @@ -28,6 +29,10 @@ import { ModelProvider } from '@/libs/agent-runtime'; import { UserModelProviderConfig } from '@/types/user/settings'; export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = { + ai21: { + enabled: false, + enabledModels: filterEnabledModels(Ai21ProviderCard), + }, ai360: { enabled: false, enabledModels: filterEnabledModels(Ai360ProviderCard), diff --git a/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx b/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx index 1b6f7ee60fac1..60b6c2627fb81 100644 --- a/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx +++ b/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx @@ -1,4 +1,5 @@ import { + Ai21, Ai360, AiMass, Anthropic, @@ -108,6 +109,10 @@ const ProviderAvatar = memo<ProviderAvatarProps>(({ provider }) => { return <Upstage color={Upstage.colorPrimary} size={56} />; } + case ModelProvider.Ai21: { + return <Ai21 color={Ai21.colorPrimary} size={56} />; + } + default: case ModelProvider.OpenAI: { return <OpenAI color={theme.colorText} size={64} />; diff --git a/src/libs/agent-runtime/AgentRuntime.ts b/src/libs/agent-runtime/AgentRuntime.ts index 0454e3bbde0a2..12ddb3b9b4f0c 100644 --- a/src/libs/agent-runtime/AgentRuntime.ts +++ b/src/libs/agent-runtime/AgentRuntime.ts @@ -3,6 +3,7 @@ import { ClientOptions } from 'openai'; import type { TracePayload } from '@/const/trace'; import { LobeRuntimeAI } from './BaseAI'; +import { LobeAi21AI } from './ai21'; import { LobeAi360AI } from './ai360'; import { LobeAnthropicAI } from './anthropic'; import { LobeAzureOpenAI } from './azureOpenai'; @@ -114,6 +115,7 @@ class AgentRuntime { static async initializeWithProviderOptions( provider: string, params: Partial<{ + ai21: Partial<ClientOptions>; ai360: Partial<ClientOptions>; anthropic: Partial<ClientOptions>; azure: { apiVersion?: string; apikey?: string; endpoint?: string }; @@ -268,6 +270,11 @@ class AgentRuntime { runtimeModel = new LobeUpstageAI(params.upstage); break } + + case ModelProvider.Ai21: { + runtimeModel = new LobeAi21AI(params.ai21); + break + } } return new AgentRuntime(runtimeModel); diff --git a/src/libs/agent-runtime/ai21/index.test.ts b/src/libs/agent-runtime/ai21/index.test.ts new file mode 100644 index 0000000000000..9b229ddd7c978 --- /dev/null +++ b/src/libs/agent-runtime/ai21/index.test.ts @@ -0,0 +1,255 @@ +// @vitest-environment node +import OpenAI from 'openai'; +import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +import { + ChatStreamCallbacks, + LobeOpenAICompatibleRuntime, + ModelProvider, +} from '@/libs/agent-runtime'; + +import * as debugStreamModule from '../utils/debugStream'; +import { LobeAi21AI } from './index'; + +const provider = ModelProvider.Ai21; +const defaultBaseURL = 'https://api.ai21.com/studio/v1'; + +const bizErrorType = 'ProviderBizError'; +const invalidErrorType = 'InvalidProviderAPIKey'; + +// Mock the console.error to avoid polluting test output +vi.spyOn(console, 'error').mockImplementation(() => {}); + +let instance: LobeOpenAICompatibleRuntime; + +beforeEach(() => { + instance = new LobeAi21AI({ apiKey: 'test' }); + + // 使用 vi.spyOn 来模拟 chat.completions.create 方法 + vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue( + new ReadableStream() as any, + ); +}); + +afterEach(() => { + vi.clearAllMocks(); +}); + +describe('LobeAi21AI', () => { + describe('init', () => { + it('should correctly initialize with an API key', async () => { + const instance = new LobeAi21AI({ apiKey: 'test_api_key' }); + expect(instance).toBeInstanceOf(LobeAi21AI); + expect(instance.baseURL).toEqual(defaultBaseURL); + }); + }); + + describe('chat', () => { + describe('Error', () => { + it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => { + // Arrange + const apiError = new OpenAI.APIError( + 400, + { + status: 400, + error: { + message: 'Bad Request', + }, + }, + 'Error message', + {}, + ); + + vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError); + + // Act + try { + await instance.chat({ + messages: [{ content: 'Hello', role: 'user' }], + model: 'jamba-1.5-mini', + temperature: 0, + }); + } catch (e) { + expect(e).toEqual({ + endpoint: defaultBaseURL, + error: { + error: { message: 'Bad Request' }, + status: 400, + }, + errorType: bizErrorType, + provider, + }); + } + }); + + it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => { + try { + new LobeAi21AI({}); + } catch (e) { + expect(e).toEqual({ errorType: invalidErrorType }); + } + }); + + it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => { + // Arrange + const errorInfo = { + stack: 'abc', + cause: { + message: 'api is undefined', + }, + }; + const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {}); + + vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError); + + // Act + try { + await instance.chat({ + messages: [{ content: 'Hello', role: 'user' }], + model: 'jamba-1.5-mini', + temperature: 0, + }); + } catch (e) { + expect(e).toEqual({ + endpoint: defaultBaseURL, + error: { + cause: { message: 'api is undefined' }, + stack: 'abc', + }, + errorType: bizErrorType, + provider, + }); + } + }); + + it('should return OpenAIBizError with an cause response with desensitize Url', async () => { + // Arrange + const errorInfo = { + stack: 'abc', + cause: { message: 'api is undefined' }, + }; + const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {}); + + instance = new LobeAi21AI({ + apiKey: 'test', + + baseURL: 'https://api.abc.com/v1', + }); + + vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError); + + // Act + try { + await instance.chat({ + messages: [{ content: 'Hello', role: 'user' }], + model: 'jamba-1.5-mini', + temperature: 0, + }); + } catch (e) { + expect(e).toEqual({ + endpoint: 'https://api.***.com/v1', + error: { + cause: { message: 'api is undefined' }, + stack: 'abc', + }, + errorType: bizErrorType, + provider, + }); + } + }); + + it('should throw an InvalidAi21APIKey error type on 401 status code', async () => { + // Mock the API call to simulate a 401 error + const error = new Error('Unauthorized') as any; + error.status = 401; + vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error); + + try { + await instance.chat({ + messages: [{ content: 'Hello', role: 'user' }], + model: 'jamba-1.5-mini', + temperature: 0, + }); + } catch (e) { + // Expect the chat method to throw an error with InvalidAi21APIKey + expect(e).toEqual({ + endpoint: defaultBaseURL, + error: new Error('Unauthorized'), + errorType: invalidErrorType, + provider, + }); + } + }); + + it('should return AgentRuntimeError for non-OpenAI errors', async () => { + // Arrange + const genericError = new Error('Generic Error'); + + vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError); + + // Act + try { + await instance.chat({ + messages: [{ content: 'Hello', role: 'user' }], + model: 'jamba-1.5-mini', + temperature: 0, + }); + } catch (e) { + expect(e).toEqual({ + endpoint: defaultBaseURL, + errorType: 'AgentRuntimeError', + provider, + error: { + name: genericError.name, + cause: genericError.cause, + message: genericError.message, + stack: genericError.stack, + }, + }); + } + }); + }); + + describe('DEBUG', () => { + it('should call debugStream and return StreamingTextResponse when DEBUG_AI21_CHAT_COMPLETION is 1', async () => { + // Arrange + const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流 + const mockDebugStream = new ReadableStream({ + start(controller) { + controller.enqueue('Debug stream content'); + controller.close(); + }, + }) as any; + mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法 + + // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法 + (instance['client'].chat.completions.create as Mock).mockResolvedValue({ + tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }], + }); + + // 保存原始环境变量值 + const originalDebugValue = process.env.DEBUG_AI21_CHAT_COMPLETION; + + // 模拟环境变量 + process.env.DEBUG_AI21_CHAT_COMPLETION = '1'; + vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve()); + + // 执行测试 + // 运行你的测试函数,确保它会在条件满足时调用 debugStream + // 假设的测试函数调用,你可能需要根据实际情况调整 + await instance.chat({ + messages: [{ content: 'Hello', role: 'user' }], + model: 'jamba-1.5-mini', + stream: true, + temperature: 0, + }); + + // 验证 debugStream 被调用 + expect(debugStreamModule.debugStream).toHaveBeenCalled(); + + // 恢复原始环境变量值 + process.env.DEBUG_AI21_CHAT_COMPLETION = originalDebugValue; + }); + }); + }); +}); diff --git a/src/libs/agent-runtime/ai21/index.ts b/src/libs/agent-runtime/ai21/index.ts new file mode 100644 index 0000000000000..67089341b7c04 --- /dev/null +++ b/src/libs/agent-runtime/ai21/index.ts @@ -0,0 +1,18 @@ +import { ModelProvider } from '../types'; +import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory'; + +export const LobeAi21AI = LobeOpenAICompatibleFactory({ + baseURL: 'https://api.ai21.com/studio/v1', + chatCompletion: { + handlePayload: (payload) => { + return { + ...payload, + stream: !payload.tools, + } as any; + }, + }, + debug: { + chatCompletion: () => process.env.DEBUG_AI21_CHAT_COMPLETION === '1', + }, + provider: ModelProvider.Ai21, +}); diff --git a/src/libs/agent-runtime/types/type.ts b/src/libs/agent-runtime/types/type.ts index 8c0999f9c1202..2182854e68cdf 100644 --- a/src/libs/agent-runtime/types/type.ts +++ b/src/libs/agent-runtime/types/type.ts @@ -22,6 +22,7 @@ export interface CreateChatCompletionOptions { } export enum ModelProvider { + Ai21 = 'ai21', Ai360 = 'ai360', Anthropic = 'anthropic', Azure = 'azure', diff --git a/src/server/globalConfig/index.ts b/src/server/globalConfig/index.ts index 6f4a9dd3c8282..5f391c6052ecc 100644 --- a/src/server/globalConfig/index.ts +++ b/src/server/globalConfig/index.ts @@ -54,6 +54,7 @@ export const getServerGlobalConfig = () => { ENABLED_STEPFUN, ENABLED_BAICHUAN, ENABLED_TAICHU, + ENABLED_AI21, ENABLED_AI360, ENABLED_SILICONCLOUD, @@ -86,6 +87,7 @@ export const getServerGlobalConfig = () => { enabledAccessCode: ACCESS_CODES?.length > 0, enabledOAuthSSO: enableNextAuth, languageModel: { + ai21: { enabled: ENABLED_AI21 }, ai360: { enabled: ENABLED_AI360 }, anthropic: { enabled: ENABLED_ANTHROPIC, diff --git a/src/types/user/settings/keyVaults.ts b/src/types/user/settings/keyVaults.ts index 523a1e8aa0c5f..aa8d5479b2dba 100644 --- a/src/types/user/settings/keyVaults.ts +++ b/src/types/user/settings/keyVaults.ts @@ -16,6 +16,7 @@ export interface AWSBedrockKeyVault { } export interface UserKeyVaults { + ai21?: OpenAICompatibleKeyVault; ai360?: OpenAICompatibleKeyVault; anthropic?: OpenAICompatibleKeyVault; azure?: AzureOpenAIKeyVault; From a8661be28b344e30753585a0e552a01c77813d16 Mon Sep 17 00:00:00 2001 From: Zhijie He <hezhijie0327@hotmail.com> Date: Tue, 10 Sep 2024 23:12:22 +0800 Subject: [PATCH 2/5] =?UTF-8?q?=F0=9F=94=A8=20chore:=20fix=20rebase=20conf?= =?UTF-8?q?licts?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Error/APIKeyForm/ProviderAvatar.tsx | 123 ------------------ 1 file changed, 123 deletions(-) delete mode 100644 src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx diff --git a/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx b/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx deleted file mode 100644 index 60b6c2627fb81..0000000000000 --- a/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx +++ /dev/null @@ -1,123 +0,0 @@ -import { - Ai21, - Ai360, - AiMass, - Anthropic, - Baichuan, - DeepSeek, - Google, - Groq, - Minimax, - Mistral, - Moonshot, - Novita, - OpenAI, - OpenRouter, - Perplexity, - Stepfun, - Together, - Tongyi, - Upstage, - ZeroOne, - Zhipu, -} from '@lobehub/icons'; -import { useTheme } from 'antd-style'; -import { memo } from 'react'; - -import { ModelProvider } from '@/libs/agent-runtime'; - -interface ProviderAvatarProps { - provider: ModelProvider; -} - -const ProviderAvatar = memo<ProviderAvatarProps>(({ provider }) => { - const theme = useTheme(); - - switch (provider as ModelProvider) { - case ModelProvider.Google: { - return <Google.Color size={56} />; - } - - case ModelProvider.ZhiPu: { - return <Zhipu.Color size={64} />; - } - - case ModelProvider.Minimax: { - return <Minimax.Color size={56} />; - } - - case ModelProvider.Mistral: { - return <Mistral.Color size={56} />; - } - - case ModelProvider.Moonshot: { - return <Moonshot size={56} />; - } - - case ModelProvider.Perplexity: { - return <Perplexity.Color size={56} />; - } - - case ModelProvider.Anthropic: { - return <Anthropic color={Anthropic.colorPrimary} size={52} />; - } - - case ModelProvider.Baichuan: { - return <Baichuan color={Baichuan.colorPrimary} size={56} />; - } - - case ModelProvider.DeepSeek: { - return <DeepSeek color={DeepSeek.colorPrimary} size={56} />; - } - - case ModelProvider.Groq: { - return <Groq color={Groq.colorPrimary} size={56} />; - } - - case ModelProvider.OpenRouter: { - return <OpenRouter color={OpenRouter.colorPrimary} size={56} />; - } - - case ModelProvider.Qwen: { - return <Tongyi color={Tongyi.colorPrimary} size={56} />; - } - - case ModelProvider.Stepfun: { - return <Stepfun color={Stepfun.colorPrimary} size={56} />; - } - - case ModelProvider.Taichu: { - return <AiMass.Color size={56} />; - } - - case ModelProvider.TogetherAI: { - return <Together color={Together.colorPrimary} size={56} />; - } - - case ModelProvider.ZeroOne: { - return <ZeroOne color={ZeroOne.colorPrimary} size={56} />; - } - case ModelProvider.Novita: { - return <Novita color={Novita.colorPrimary} size={56} />; - } - - case ModelProvider.Ai360: { - return <Ai360 color={Ai360.colorPrimary} size={56} />; - } - - case ModelProvider.Upstage: { - return <Upstage color={Upstage.colorPrimary} size={56} />; - } - - case ModelProvider.Ai21: { - return <Ai21 color={Ai21.colorPrimary} size={56} />; - } - - default: - case ModelProvider.OpenAI: { - return <OpenAI color={theme.colorText} size={64} />; - } - } -}); - -export default ProviderAvatar; From b73852aa96a438ac060eb853c410b39c3513e1cb Mon Sep 17 00:00:00 2001 From: Zhijie He <hezhijie0327@hotmail.com> Date: Wed, 11 Sep 2024 09:13:12 +0800 Subject: [PATCH 3/5] =?UTF-8?q?=F0=9F=90=9B=20fix:=20fix=20CI=20error?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/libs/agent-runtime/AgentRuntime.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libs/agent-runtime/AgentRuntime.ts b/src/libs/agent-runtime/AgentRuntime.ts index 204977cc8e13d..b58fcece2df92 100644 --- a/src/libs/agent-runtime/AgentRuntime.ts +++ b/src/libs/agent-runtime/AgentRuntime.ts @@ -289,6 +289,7 @@ class AgentRuntime { runtimeModel = new LobeAi21AI(params.ai21); break; } + } return new AgentRuntime(runtimeModel); } From 825bf931115b5d4cdca2d379264cd99f0bfbfb4c Mon Sep 17 00:00:00 2001 From: Zhijie He <hezhijie0327@hotmail.com> Date: Thu, 12 Sep 2024 13:31:55 +0800 Subject: [PATCH 4/5] =?UTF-8?q?=F0=9F=92=84=20style:=20add=20model=20price?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/config/modelProviders/ai21.ts | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/config/modelProviders/ai21.ts b/src/config/modelProviders/ai21.ts index bcb868c1a67cb..f2ce25c9ca53f 100644 --- a/src/config/modelProviders/ai21.ts +++ b/src/config/modelProviders/ai21.ts @@ -8,6 +8,10 @@ const Ai21: ModelProviderCard = { enabled: true, functionCall: true, id: 'jamba-1.5-mini', + pricing: { + input: 0.2, + output: 0.4, + }, tokens: 256_000, }, { @@ -15,13 +19,19 @@ const Ai21: ModelProviderCard = { enabled: true, functionCall: true, id: 'jamba-1.5-large', + pricing: { + input: 2, + output: 8, + }, tokens: 256_000, }, ], checkModel: 'jamba-1.5-mini', id: 'ai21', + modelsUrl: 'https://docs.ai21.com/reference', modelList: { showModelFetcher: true }, name: 'Ai21Labs', + url: 'https://studio.ai21.com', }; export default Ai21; From dc159aaf6a7ce8452de51a74691773bc59a72004 Mon Sep 17 00:00:00 2001 From: Zhijie He <hezhijie0327@hotmail.com> Date: Thu, 12 Sep 2024 15:32:49 +0800 Subject: [PATCH 5/5] =?UTF-8?q?=F0=9F=90=9B=20fix:=20fix=20CI=20error?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/config/modelProviders/ai21.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config/modelProviders/ai21.ts b/src/config/modelProviders/ai21.ts index f2ce25c9ca53f..809f2b28b2b58 100644 --- a/src/config/modelProviders/ai21.ts +++ b/src/config/modelProviders/ai21.ts @@ -28,8 +28,8 @@ const Ai21: ModelProviderCard = { ], checkModel: 'jamba-1.5-mini', id: 'ai21', - modelsUrl: 'https://docs.ai21.com/reference', modelList: { showModelFetcher: true }, + modelsUrl: 'https://docs.ai21.com/reference', name: 'Ai21Labs', url: 'https://studio.ai21.com', };