diff --git a/cortex-js/package.json b/cortex-js/package.json index 8f433f591..901a11c5c 100644 --- a/cortex-js/package.json +++ b/cortex-js/package.json @@ -53,7 +53,6 @@ "decompress": "^4.2.1", "js-yaml": "^4.1.0", "nest-commander": "^3.13.0", - "openai": "^4.50.0", "readline": "^1.3.0", "reflect-metadata": "^0.2.0", "rxjs": "^7.8.1", @@ -81,6 +80,7 @@ "@typescript-eslint/eslint-plugin": "^6.0.0", "@typescript-eslint/parser": "^6.0.0", "bun": "^1.1.15", + "cortexso-node": "^0.0.4", "cpx": "^1.5.0", "eslint": "^8.42.0", "eslint-config-prettier": "^9.0.0", diff --git a/cortex-js/src/domain/models/assistant.interface.ts b/cortex-js/src/domain/models/assistant.interface.ts index 73b1f454b..0af7f9ad6 100644 --- a/cortex-js/src/domain/models/assistant.interface.ts +++ b/cortex-js/src/domain/models/assistant.interface.ts @@ -1,5 +1,5 @@ -import { Assistant as OpenAiAssistant } from 'openai/resources/beta/assistants'; -import { AssistantResponseFormatOption as OpenAIAssistantResponseFormatOption } from 'openai/resources/beta/threads/threads'; +import { Assistant as OpenAiAssistant } from 'cortexso-node/resources/beta/assistants'; +import { AssistantResponseFormatOption as OpenAIAssistantResponseFormatOption } from 'cortexso-node/resources/beta/threads/threads'; export interface Assistant extends OpenAiAssistant { avatar?: string; diff --git a/cortex-js/src/domain/models/message.interface.ts b/cortex-js/src/domain/models/message.interface.ts index 86bf82717..1de12d8a2 100644 --- a/cortex-js/src/domain/models/message.interface.ts +++ b/cortex-js/src/domain/models/message.interface.ts @@ -2,7 +2,7 @@ import { Message as OpenAiMessage, MessageContent as OpenAiMessageContent, TextContentBlock as OpenAiTextContentBlock, -} from 'openai/resources/beta/threads/messages'; +} from 'cortexso-node/resources/beta/threads/messages'; export interface Message extends OpenAiMessage {} diff --git a/cortex-js/src/domain/models/model.interface.ts b/cortex-js/src/domain/models/model.interface.ts index fdf1699da..37119a28f 100644 --- a/cortex-js/src/domain/models/model.interface.ts +++ b/cortex-js/src/domain/models/model.interface.ts @@ -1,4 +1,4 @@ -import { Model as OpenAiModel } from 'openai/resources/models'; +import { Model as OpenAiModel } from 'cortexso-node/resources/models'; export interface Model extends OpenAiModel, diff --git a/cortex-js/src/domain/models/thread.interface.ts b/cortex-js/src/domain/models/thread.interface.ts index 20dc7d3be..583f7201d 100644 --- a/cortex-js/src/domain/models/thread.interface.ts +++ b/cortex-js/src/domain/models/thread.interface.ts @@ -1,4 +1,4 @@ -import { Thread as OpenAiThread } from 'openai/resources/beta/threads/threads'; +import { Thread as OpenAiThread } from 'cortexso-node/resources/beta/threads/threads'; import { Assistant } from './assistant.interface'; export interface ThreadToolResources extends OpenAiThread.ToolResources {} diff --git a/cortex-js/src/infrastructure/commanders/types/benchmark-config.interface.ts b/cortex-js/src/infrastructure/commanders/types/benchmark-config.interface.ts index d3117a744..b953d253e 100644 --- a/cortex-js/src/infrastructure/commanders/types/benchmark-config.interface.ts +++ b/cortex-js/src/infrastructure/commanders/types/benchmark-config.interface.ts @@ -1,4 +1,4 @@ -import { ChatCompletionMessageParam } from 'openai/resources'; +import { ChatCompletionMessageParam } from 'cortexso-node/resources'; export interface ApiConfig { base_url: string; diff --git a/cortex-js/src/infrastructure/commanders/usecases/benchmark.cli.usecases.ts b/cortex-js/src/infrastructure/commanders/usecases/benchmark.cli.usecases.ts index ea7cba78e..9bdaeaad1 100644 --- a/cortex-js/src/infrastructure/commanders/usecases/benchmark.cli.usecases.ts +++ b/cortex-js/src/infrastructure/commanders/usecases/benchmark.cli.usecases.ts @@ -1,7 +1,7 @@ import { Injectable } from '@nestjs/common'; import si from 'systeminformation'; import fs, { existsSync, mkdirSync, readFileSync, writeFileSync } from 'fs'; -import OpenAI from 'openai'; +import Cortex from 'cortexso-node'; import { Presets, SingleBar } from 'cli-progress'; import yaml from 'js-yaml'; import { FileManagerService } from '@/infrastructure/services/file-manager/file-manager.service'; @@ -27,7 +27,7 @@ export class BenchmarkCliUsecases { ) {} config: BenchmarkConfig; - openai?: OpenAI; + cortexClient?: Cortex; /** * Benchmark and analyze the performance of a specific AI model using a variety of system resources */ @@ -43,7 +43,7 @@ export class BenchmarkCliUsecases { const model = params?.model ?? this.config.api.parameters.model; // TODO: Using OpenAI client or Cortex client to benchmark? - this.openai = new OpenAI({ + this.cortexClient = new Cortex({ apiKey: this.config.api.api_key, baseURL: this.config.api.base_url, timeout: 20 * 1000, @@ -60,11 +60,7 @@ export class BenchmarkCliUsecases { .then(() => this.psUsecases .getModels() - .then((models) => - models.find( - (e) => e.modelId === model, - ), - ), + .then((models) => models.find((e) => e.modelId === model)), ) .then((model) => { if (!model) @@ -147,7 +143,7 @@ export class BenchmarkCliUsecases { let firstTokenTime = null; try { - const stream = await this.openai!.chat.completions.create({ + const stream = await this.cortexClient!.chat.completions.create({ model: this.config.api.parameters.model, messages: this.config.api.parameters.messages, max_tokens: this.config.api.parameters.max_tokens,