diff --git a/app/client/anthropic/config.ts b/app/client/anthropic/config.ts new file mode 100644 index 00000000000..1ba8f4f0003 --- /dev/null +++ b/app/client/anthropic/config.ts @@ -0,0 +1,29 @@ +export const AnthropicConfig = { + model: { + model: "claude-instant-1", + summarizeModel: "claude-instant-1", + + max_tokens_to_sample: 8192, + temperature: 0.5, + top_p: 0.7, + top_k: 5, + }, + provider: { + name: "Anthropic" as const, + endpoint: "https://api.anthropic.com", + apiKey: "", + customModels: "", + version: "2023-06-01", + + models: [ + { + name: "claude-instant-1", + available: true, + }, + { + name: "claude-2", + available: true, + }, + ], + }, +}; diff --git a/app/client/anthropic/index.ts b/app/client/anthropic/index.ts new file mode 100644 index 00000000000..863fde951b5 --- /dev/null +++ b/app/client/anthropic/index.ts @@ -0,0 +1,233 @@ +import { ModelConfig, ProviderConfig } from "@/app/store"; +import { createLogger } from "@/app/utils/log"; +import { getAuthKey } from "../common/auth"; +import { API_PREFIX, AnthropicPath, ApiPath } from "@/app/constant"; +import { getApiPath } from "@/app/utils/path"; +import { trimEnd } from "@/app/utils/string"; +import { Anthropic } from "./types"; +import { ChatOptions, LLMModel, LLMUsage, RequestMessage } from "../types"; +import { omit } from "@/app/utils/object"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import Locale from "@/app/locales"; +import { AnthropicConfig } from "./config"; + +export function createAnthropicClient( + providerConfigs: ProviderConfig, + modelConfig: ModelConfig, +) { + const anthropicConfig = { ...providerConfigs.anthropic }; + const logger = createLogger("[Anthropic]"); + const anthropicModelConfig = { ...modelConfig.anthropic }; + + return { + headers() { + return { + "Content-Type": "application/json", + "x-api-key": getAuthKey(anthropicConfig.apiKey), + "anthropic-version": anthropicConfig.version, + }; + }, + + path(path: AnthropicPath): string { + let baseUrl: string = anthropicConfig.endpoint; + + // if endpoint is empty, use default endpoint + if (baseUrl.trim().length === 0) { + baseUrl = getApiPath(ApiPath.Anthropic); + } + + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(API_PREFIX)) { + baseUrl = "https://" + baseUrl; + } + + baseUrl = trimEnd(baseUrl, "/"); + + return `${baseUrl}/${path}`; + }, + + extractMessage(res: Anthropic.ChatResponse) { + return res.completion; + }, + + beforeRequest(options: ChatOptions, stream = false) { + const ClaudeMapper: Record = { + assistant: "Assistant", + user: "Human", + system: "Human", + }; + + const prompt = options.messages + .map((v) => ({ + role: ClaudeMapper[v.role] ?? "Human", + content: v.content, + })) + .map((v) => `\n\n${v.role}: ${v.content}`) + .join(""); + + if (options.shouldSummarize) { + anthropicModelConfig.model = anthropicModelConfig.summarizeModel; + } + + const requestBody: Anthropic.ChatRequest = { + prompt, + stream, + ...omit(anthropicModelConfig, "summarizeModel"), + }; + + const path = this.path(AnthropicPath.Chat); + + logger.log("path = ", path, requestBody); + + const controller = new AbortController(); + options.onController?.(controller); + + const payload = { + method: "POST", + body: JSON.stringify(requestBody), + signal: controller.signal, + headers: this.headers(), + mode: "no-cors" as RequestMode, + }; + + return { + path, + payload, + controller, + }; + }, + + async chat(options: ChatOptions) { + try { + const { path, payload, controller } = this.beforeRequest( + options, + false, + ); + + controller.signal.onabort = () => options.onFinish(""); + + const res = await fetch(path, payload); + const resJson = await res.json(); + + const message = this.extractMessage(resJson); + options.onFinish(message); + } catch (e) { + logger.error("failed to chat", e); + options.onError?.(e as Error); + } + }, + + async chatStream(options: ChatOptions) { + try { + const { path, payload, controller } = this.beforeRequest(options, true); + + const context = { + text: "", + finished: false, + }; + + const finish = () => { + if (!context.finished) { + options.onFinish(context.text); + context.finished = true; + } + }; + + controller.signal.onabort = finish; + + logger.log(payload); + + fetchEventSource(path, { + ...payload, + async onopen(res) { + const contentType = res.headers.get("content-type"); + logger.log("response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + context.text = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [context.text]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + context.text = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || context.finished) { + return finish(); + } + const chunk = msg.data; + try { + const chunkJson = JSON.parse( + chunk, + ) as Anthropic.ChatStreamResponse; + const delta = chunkJson.completion; + if (delta) { + context.text += delta; + options.onUpdate?.(context.text, delta); + } + } catch (e) { + logger.error("[Request] parse error", chunk, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + }, + openWhenHidden: true, + }); + } catch (e) { + logger.error("failed to chat", e); + options.onError?.(e as Error); + } + }, + + async usage() { + return { + used: 0, + total: 0, + } as LLMUsage; + }, + + async models(): Promise { + const customModels = anthropicConfig.customModels + .split(",") + .map((v) => v.trim()) + .filter((v) => !!v) + .map((v) => ({ + name: v, + available: true, + })); + + return [...AnthropicConfig.provider.models.slice(), ...customModels]; + }, + }; +} diff --git a/app/client/anthropic/types.ts b/app/client/anthropic/types.ts new file mode 100644 index 00000000000..347693aa86f --- /dev/null +++ b/app/client/anthropic/types.ts @@ -0,0 +1,24 @@ +export namespace Anthropic { + export interface ChatRequest { + model: string; // The model that will complete your prompt. + prompt: string; // The prompt that you want Claude to complete. + max_tokens_to_sample: number; // The maximum number of tokens to generate before stopping. + stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text. + temperature?: number; // Amount of randomness injected into the response. + top_p?: number; // Use nucleus sampling. + top_k?: number; // Only sample from the top K options for each subsequent token. + metadata?: object; // An object describing metadata about the request. + stream?: boolean; // Whether to incrementally stream the response using server-sent events. + } + + export interface ChatResponse { + completion: string; + stop_reason: "stop_sequence" | "max_tokens"; + model: string; + } + + export type ChatStreamResponse = ChatResponse & { + stop?: string; + log_id: string; + }; +} diff --git a/app/client/common/auth.ts b/app/client/common/auth.ts index 9533ebfd2d3..f7285238846 100644 --- a/app/client/common/auth.ts +++ b/app/client/common/auth.ts @@ -6,23 +6,22 @@ export function bearer(value: string) { return `Bearer ${value.trim()}`; } -export function getAuthHeaders(apiKey = "") { +export function getAuthKey(apiKey = "") { const accessStore = useAccessStore.getState(); const isApp = !!getClientConfig()?.isApp; - - let headers: Record = {}; + let authKey = ""; if (apiKey) { // use user's api key first - headers.Authorization = bearer(apiKey); + authKey = bearer(apiKey); } else if ( accessStore.enabledAccessControl() && !isApp && !!accessStore.accessCode ) { // or use access code - headers.Authorization = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode); + authKey = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode); } - return headers; + return authKey; } diff --git a/app/client/common/config.ts b/app/client/common/config.ts deleted file mode 100644 index 127773a4c3b..00000000000 --- a/app/client/common/config.ts +++ /dev/null @@ -1,5 +0,0 @@ -export const COMMON_PROVIDER_CONFIG = { - customModels: "", - models: [] as string[], - autoFetchModels: false, // fetch available models from server or not -}; diff --git a/app/client/core.ts b/app/client/core.ts index a75cf3fc067..8e7305f8619 100644 --- a/app/client/core.ts +++ b/app/client/core.ts @@ -2,9 +2,11 @@ import { MaskConfig, ProviderConfig } from "../store"; import { shareToShareGPT } from "./common/share"; import { createOpenAiClient } from "./openai"; import { ChatControllerPool } from "./common/controller"; +import { createAnthropicClient } from "./anthropic"; export const LLMClients = { openai: createOpenAiClient, + anthropic: createAnthropicClient, }; export function createLLMClient( diff --git a/app/client/openai/config.ts b/app/client/openai/config.ts index b27534162e6..dda89c70629 100644 --- a/app/client/openai/config.ts +++ b/app/client/openai/config.ts @@ -1,5 +1,3 @@ -import { COMMON_PROVIDER_CONFIG } from "../common/config"; - export const OpenAIConfig = { model: { model: "gpt-3.5-turbo" as string, @@ -12,9 +10,57 @@ export const OpenAIConfig = { frequency_penalty: 0, }, provider: { - name: "OpenAI", + name: "OpenAI" as const, endpoint: "https://api.openai.com", apiKey: "", - ...COMMON_PROVIDER_CONFIG, + customModels: "", + autoFetchModels: false, // fetch available models from server or not + + models: [ + { + name: "gpt-4", + available: true, + }, + { + name: "gpt-4-0314", + available: true, + }, + { + name: "gpt-4-0613", + available: true, + }, + { + name: "gpt-4-32k", + available: true, + }, + { + name: "gpt-4-32k-0314", + available: true, + }, + { + name: "gpt-4-32k-0613", + available: true, + }, + { + name: "gpt-3.5-turbo", + available: true, + }, + { + name: "gpt-3.5-turbo-0301", + available: true, + }, + { + name: "gpt-3.5-turbo-0613", + available: true, + }, + { + name: "gpt-3.5-turbo-16k", + available: true, + }, + { + name: "gpt-3.5-turbo-16k-0613", + available: true, + }, + ], }, }; diff --git a/app/client/openai/index.ts b/app/client/openai/index.ts index a452936de97..827604b1ba2 100644 --- a/app/client/openai/index.ts +++ b/app/client/openai/index.ts @@ -3,12 +3,7 @@ import { fetchEventSource, } from "@fortaine/fetch-event-source"; -import { - API_PREFIX, - ApiPath, - DEFAULT_MODELS, - OpenaiPath, -} from "@/app/constant"; +import { API_PREFIX, ApiPath, OpenaiPath } from "@/app/constant"; import { ModelConfig, ProviderConfig } from "@/app/store"; import { OpenAI } from "./types"; @@ -21,7 +16,8 @@ import { getApiPath } from "@/app/utils/path"; import { trimEnd } from "@/app/utils/string"; import { omit } from "@/app/utils/object"; import { createLogger } from "@/app/utils/log"; -import { getAuthHeaders } from "../common/auth"; +import { getAuthKey } from "../common/auth"; +import { OpenAIConfig } from "./config"; export function createOpenAiClient( providerConfigs: ProviderConfig, @@ -35,12 +31,12 @@ export function createOpenAiClient( headers() { return { "Content-Type": "application/json", - ...getAuthHeaders(openaiConfig.apiKey), + Authorization: getAuthKey(), }; }, path(path: OpenaiPath): string { - let baseUrl = openaiConfig.endpoint; + let baseUrl: string = openaiConfig.endpoint; // if endpoint is empty, use default endpoint if (baseUrl.trim().length === 0) { @@ -206,59 +202,9 @@ export function createOpenAiClient( }, async usage() { - const formatDate = (d: Date) => - `${d.getFullYear()}-${(d.getMonth() + 1) - .toString() - .padStart(2, "0")}-${d.getDate().toString().padStart(2, "0")}`; - const ONE_DAY = 1 * 24 * 60 * 60 * 1000; - const now = new Date(); - const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1); - const startDate = formatDate(startOfMonth); - const endDate = formatDate(new Date(Date.now() + ONE_DAY)); - - const [used, subs] = await Promise.all([ - fetch( - `${this.path( - OpenaiPath.Usage, - )}?start_date=${startDate}&end_date=${endDate}`, - { - method: "GET", - headers: this.headers(), - }, - ), - fetch(this.path(OpenaiPath.Subs), { - method: "GET", - headers: this.headers(), - }), - ]); - - if (!used.ok || !subs.ok) { - throw new Error("Failed to query usage from openai"); - } - - const response = (await used.json()) as { - total_usage?: number; - error?: { - type: string; - message: string; - }; - }; - - const total = (await subs.json()) as { - hard_limit_usd?: number; - }; - - if (response.error?.type) { - throw Error(response.error?.message); - } - - response.total_usage = Math.round(response.total_usage ?? 0) / 100; - total.hard_limit_usd = - Math.round((total.hard_limit_usd ?? 0) * 100) / 100; - return { - used: response.total_usage, - total: total.hard_limit_usd, + used: 0, + total: 0, } as LLMUsage; }, @@ -266,13 +212,14 @@ export function createOpenAiClient( const customModels = openaiConfig.customModels .split(",") .map((v) => v.trim()) + .filter((v) => !!v) .map((v) => ({ name: v, available: true, })); if (!openaiConfig.autoFetchModels) { - return [...DEFAULT_MODELS.slice(), ...customModels]; + return [...OpenAIConfig.provider.models.slice(), ...customModels]; } const res = await fetch(this.path(OpenaiPath.ListModel), { diff --git a/app/client/types.ts b/app/client/types.ts index 694059e1c36..24753869f69 100644 --- a/app/client/types.ts +++ b/app/client/types.ts @@ -1,5 +1,3 @@ -import { DEFAULT_MODELS } from "../constant"; - export interface LLMUsage { used: number; total: number; @@ -14,8 +12,6 @@ export interface LLMModel { export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; -export type ChatModel = (typeof DEFAULT_MODELS)[number]["name"]; - export interface RequestMessage { role: MessageRole; content: string; diff --git a/app/components/config/anthropic/model.tsx b/app/components/config/anthropic/model.tsx new file mode 100644 index 00000000000..c5522595884 --- /dev/null +++ b/app/components/config/anthropic/model.tsx @@ -0,0 +1,79 @@ +import { ModelConfig } from "@/app/store"; +import { ModelConfigProps } from "../types"; +import { ListItem, Select } from "../../ui-lib"; +import Locale from "@/app/locales"; +import { InputRange } from "../../input-range"; + +export function AnthropicModelConfig( + props: ModelConfigProps, +) { + return ( + <> + + + + + { + props.updateConfig( + (config) => (config.temperature = e.currentTarget.valueAsNumber), + ); + }} + > + + + { + props.updateConfig( + (config) => (config.top_p = e.currentTarget.valueAsNumber), + ); + }} + > + + + + props.updateConfig( + (config) => + (config.max_tokens_to_sample = e.currentTarget.valueAsNumber), + ) + } + > + + + ); +} diff --git a/app/components/config/anthropic/provider.tsx b/app/components/config/anthropic/provider.tsx new file mode 100644 index 00000000000..f06fc71a267 --- /dev/null +++ b/app/components/config/anthropic/provider.tsx @@ -0,0 +1,70 @@ +import { ProviderConfig } from "@/app/store"; +import { ProviderConfigProps } from "../types"; +import { ListItem, PasswordInput } from "../../ui-lib"; +import Locale from "@/app/locales"; +import { REMOTE_API_HOST } from "@/app/constant"; + +export function AnthropicProviderConfig( + props: ProviderConfigProps, +) { + return ( + <> + + + props.updateConfig( + (config) => (config.endpoint = e.currentTarget.value), + ) + } + > + + + { + props.updateConfig( + (config) => (config.apiKey = e.currentTarget.value), + ); + }} + /> + + + { + props.updateConfig( + (config) => (config.version = e.currentTarget.value), + ); + }} + /> + + + + props.updateConfig( + (config) => (config.customModels = e.currentTarget.value), + ) + } + > + + + ); +} diff --git a/app/components/config/index.tsx b/app/components/config/index.tsx index b08fe06088f..22f9089f78d 100644 --- a/app/components/config/index.tsx +++ b/app/components/config/index.tsx @@ -11,6 +11,10 @@ import { OpenAIProviderConfig } from "./openai/provider"; import { ListItem, Select } from "../ui-lib"; import Locale from "@/app/locales"; import { InputRange } from "../input-range"; +import { OpenAIConfig } from "@/app/client/openai/config"; +import { AnthropicModelConfig } from "./anthropic/model"; +import { AnthropicConfig } from "@/app/client/anthropic/config"; +import { AnthropicProviderConfig } from "./anthropic/provider"; export function ModelConfigList(props: { provider: LLMProvider; @@ -24,16 +28,17 @@ export function ModelConfigList(props: { updateConfig={(update) => { props.updateConfig((config) => update(config.openai)); }} - models={[ - { - name: "gpt-3.5-turbo", - available: true, - }, - { - name: "gpt-4", - available: true, - }, - ]} + models={OpenAIConfig.provider.models} + /> + ); + } else if (props.provider === "anthropic") { + return ( + { + props.updateConfig((config) => update(config.anthropic)); + }} + models={AnthropicConfig.provider.models} /> ); } @@ -55,6 +60,15 @@ export function ProviderConfigList(props: { }} /> ); + } else if (props.provider === "anthropic") { + return ( + { + props.updateConfig((config) => update(config.anthropic)); + }} + /> + ); } return null; diff --git a/app/components/config/openai/provider.tsx b/app/components/config/openai/provider.tsx index b905b130dfa..b3479e986ea 100644 --- a/app/components/config/openai/provider.tsx +++ b/app/components/config/openai/provider.tsx @@ -3,6 +3,8 @@ import { ProviderConfigProps } from "../types"; import { ListItem, PasswordInput } from "../../ui-lib"; import Locale from "@/app/locales"; import { REMOTE_API_HOST } from "@/app/constant"; +import { IconButton } from "../../button"; +import ReloadIcon from "@/app/icons/reload.svg"; export function OpenAIProviderConfig( props: ProviderConfigProps, @@ -58,6 +60,7 @@ export function OpenAIProviderConfig( props.updateConfig( diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx deleted file mode 100644 index 00734382cf8..00000000000 --- a/app/components/model-config.tsx +++ /dev/null @@ -1,139 +0,0 @@ -import { ModalConfigValidator, ModelConfig, useAppConfig } from "../store"; - -import Locale from "../locales"; -import { InputRange } from "./input-range"; -import { ListItem, Select } from "./ui-lib"; - -export function _ModelConfigList(props: { - modelConfig: ModelConfig; - updateConfig: (updater: (config: ModelConfig) => void) => void; -}) { - return null; - /* - const config = useAppConfig(); - - return ( - <> - - - - - { - props.updateConfig( - (config) => - (config.temperature = ModalConfigValidator.temperature( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - { - props.updateConfig( - (config) => - (config.top_p = ModalConfigValidator.top_p( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - - props.updateConfig( - (config) => - (config.max_tokens = ModalConfigValidator.max_tokens( - e.currentTarget.valueAsNumber, - )), - ) - } - > - - - { - props.updateConfig( - (config) => - (config.presence_penalty = - ModalConfigValidator.presence_penalty( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - - { - props.updateConfig( - (config) => - (config.frequency_penalty = - ModalConfigValidator.frequency_penalty( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - - - ); - */ -} diff --git a/app/components/settings.tsx b/app/components/settings.tsx index ffe3850f098..6eca784ac59 100644 --- a/app/components/settings.tsx +++ b/app/components/settings.tsx @@ -37,8 +37,6 @@ import { useUpdateStore, useAccessStore, useAppConfig, - LLMProvider, - LLMProviders, } from "../store"; import Locale, { @@ -578,22 +576,6 @@ export function Settings() { console.log("[Update] remote version ", updateStore.remoteVersion); } - const usage = { - used: updateStore.used, - subscription: updateStore.subscription, - }; - const [loadingUsage, setLoadingUsage] = useState(false); - function checkUsage(force = false) { - if (accessStore.hideBalanceQuery) { - return; - } - - setLoadingUsage(true); - updateStore.updateUsage(force).finally(() => { - setLoadingUsage(false); - }); - } - const accessStore = useAccessStore(); const enabledAccessControl = useMemo( () => accessStore.enabledAccessControl(), @@ -610,7 +592,6 @@ export function Settings() { useEffect(() => { // checks per minutes checkUpdate(); - showUsage && checkUsage(); // eslint-disable-next-line react-hooks/exhaustive-deps }, []); @@ -806,6 +787,28 @@ export function Settings() { + + {showAccessCode ? ( + + { + accessStore.update( + (config) => (config.accessCode = e.currentTarget.value), + ); + }} + /> + + ) : ( + <> + )} + + @@ -875,56 +878,6 @@ export function Settings() { - - {showAccessCode ? ( - - { - accessStore.update( - (config) => (config.accessCode = e.currentTarget.value), - ); - }} - /> - - ) : ( - <> - )} - - {!accessStore.hideUserApiKey ? <> : null} - - {!accessStore.hideBalanceQuery ? ( - - {!showUsage || loadingUsage ? ( -
- ) : ( - } - text={Locale.Settings.Usage.Check} - onClick={() => checkUsage(true)} - /> - )} - - ) : null} - - res.json()) @@ -48,9 +48,7 @@ export const useAccessStore = createPersistStore( set(() => ({ ...res })); if (res.disableGPT4) { - DEFAULT_MODELS.forEach( - (m: any) => (m.available = !m.name.startsWith("gpt-4")), - ); + // disable model } }) .catch(() => { diff --git a/app/store/chat.ts b/app/store/chat.ts index 2a66a359b4c..1dff506d91d 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -2,20 +2,9 @@ import { trimTopic } from "../utils"; import Locale, { getLang } from "../locales"; import { showToast } from "../components/ui-lib"; -import { - LLMProvider, - MaskConfig, - ModelConfig, - ModelType, - useAppConfig, -} from "./config"; +import { MaskConfig, useAppConfig } from "./config"; import { createEmptyMask, Mask } from "./mask"; -import { - DEFAULT_INPUT_TEMPLATE, - DEFAULT_SYSTEM_TEMPLATE, - StoreKey, - SUMMARIZE_MODEL, -} from "../constant"; +import { DEFAULT_INPUT_TEMPLATE, StoreKey } from "../constant"; import { ChatControllerPool } from "../client/common/controller"; import { prettyObject } from "../utils/format"; import { estimateTokenLength } from "../utils/token"; @@ -85,11 +74,6 @@ function createEmptySession(): ChatSession { }; } -function getSummarizeModel(currentModel: string) { - // if it is using gpt-* models, force to use 3.5 to summarize - return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel; -} - function countMessages(msgs: ChatMessage[]) { return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0); } @@ -291,6 +275,18 @@ export const useChatStore = createPersistStore( return this.extractModelConfig(maskConfig); }, + getMaxTokens() { + const maskConfig = this.getCurrentMaskConfig(); + + if (maskConfig.provider === "openai") { + return maskConfig.modelConfig.openai.max_tokens; + } else if (maskConfig.provider === "anthropic") { + return maskConfig.modelConfig.anthropic.max_tokens_to_sample; + } + + return 8192; + }, + getClient() { const appConfig = useAppConfig.getState(); const currentMaskConfig = get().getCurrentMaskConfig(); @@ -463,7 +459,7 @@ export const useChatStore = createPersistStore( : shortTermMemoryStartIndex; // and if user has cleared history messages, we should exclude the memory too. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex); - const maxTokenThreshold = modelConfig.max_tokens; + const maxTokenThreshold = this.getMaxTokens(); // get recent messages as much as possible const reversedRecentMessages = []; @@ -546,7 +542,6 @@ export const useChatStore = createPersistStore( }); } - const modelConfig = this.getCurrentModelConfig(); const summarizeIndex = Math.max( session.lastSummarizeIndex, session.clearContextIndex ?? 0, @@ -557,7 +552,7 @@ export const useChatStore = createPersistStore( const historyMsgLength = countMessages(toBeSummarizedMsgs); - if (historyMsgLength > modelConfig?.max_tokens ?? 4000) { + if (historyMsgLength > this.getMaxTokens()) { const n = toBeSummarizedMsgs.length; toBeSummarizedMsgs = toBeSummarizedMsgs.slice( Math.max(0, n - chatConfig.historyMessageCount), diff --git a/app/store/config.ts b/app/store/config.ts index 6f388a8b130..1fb6d6878be 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -2,7 +2,6 @@ import { isMacOS } from "../utils"; import { getClientConfig } from "../config/client"; import { DEFAULT_INPUT_TEMPLATE, - DEFAULT_MODELS, DEFAULT_SIDEBAR_WIDTH, StoreKey, } from "../constant"; @@ -10,8 +9,7 @@ import { createPersistStore } from "../utils/store"; import { OpenAIConfig } from "../client/openai/config"; import { api } from "../client"; import { SubmitKey, Theme } from "../typing"; - -export type ModelType = (typeof DEFAULT_MODELS)[number]["name"]; +import { AnthropicConfig } from "../client/anthropic/config"; export const DEFAULT_CHAT_CONFIG = { enableAutoGenerateTitle: true, @@ -25,17 +23,13 @@ export type ChatConfig = typeof DEFAULT_CHAT_CONFIG; export const DEFAULT_PROVIDER_CONFIG = { openai: OpenAIConfig.provider, + anthropic: AnthropicConfig.provider, // azure: { // endpoint: "https://api.openai.com", // apiKey: "", // version: "", // ...COMMON_PROVIDER_CONFIG, // }, - // claude: { - // endpoint: "https://api.anthropic.com", - // apiKey: "", - // ...COMMON_PROVIDER_CONFIG, - // }, // google: { // endpoint: "https://api.anthropic.com", // apiKey: "", @@ -45,6 +39,7 @@ export const DEFAULT_PROVIDER_CONFIG = { export const DEFAULT_MODEL_CONFIG = { openai: OpenAIConfig.model, + anthropic: AnthropicConfig.model, // azure: { // model: "gpt-3.5-turbo" as string, // summarizeModel: "gpt-3.5-turbo", @@ -55,15 +50,6 @@ export const DEFAULT_MODEL_CONFIG = { // presence_penalty: 0, // frequency_penalty: 0, // }, - // claude: { - // model: "claude-2", - // summarizeModel: "claude-2", - // - // max_tokens_to_sample: 100000, - // temperature: 1, - // top_p: 0.7, - // top_k: 1, - // }, // google: { // model: "chat-bison-001", // summarizeModel: "claude-2", @@ -125,7 +111,7 @@ export function limitNumber( export const ModalConfigValidator = { model(x: string) { - return x as ModelType; + return x as string; }, max_tokens(x: number) { return limitNumber(x, 0, 100000, 2000); diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index e530203f680..666d81be7ec 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -9,7 +9,7 @@ }, "package": { "productName": "ChatGPT Next Web", - "version": "2.9.9" + "version": "3.0.0" }, "tauri": { "allowlist": {