Skip to content

Commit

Permalink
Merge pull request Chanzhaoyu#374 from assassinliujie/main
Browse files Browse the repository at this point in the history
fix the max tokens for gpt-4-turbo 128K
  • Loading branch information
Kerwin1202 authored Nov 23, 2023
2 parents b4f138d + ea1369c commit d372aa0
Showing 1 changed file with 6 additions and 1 deletion.
7 changes: 6 additions & 1 deletion service/src/chatgpt/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,12 @@ export async function initApi(key: KeyConfig, chatModel: string) {
// Set the token limits based on the model's type. This is because different models have different token limits.
// The token limit includes the token count from both the message array sent and the model response.
// 'gpt-35-turbo' has a limit of 4096 tokens, 'gpt-4' and 'gpt-4-32k' have limits of 8192 and 32768 tokens respectively.

// Check if the model type is GPT-4-turbo
if (model.toLowerCase().includes('1106-preview')) {
//If it's a '1106-preview' model, set the maxModelTokens to 131072
options.maxModelTokens = 131072
options.maxResponseTokens = 32768
}
// Check if the model type includes '16k'
if (model.toLowerCase().includes('16k')) {
// If it's a '16k' model, set the maxModelTokens to 16384 and maxResponseTokens to 4096
Expand Down

0 comments on commit d372aa0

Please sign in to comment.