From 9a01d1b876ac7ed152a98d6c5d56ec6f5c230bbc Mon Sep 17 00:00:00 2001 From: dashi6174 Date: Mon, 20 May 2024 17:25:19 +0800 Subject: [PATCH] The default max tokens of 215 is too small, answers are often cut off.I will modify it to 512 to address this issue. (#845) ### What problem does this PR solve? ### Type of change - [x] Refactoring --- api/db/db_models.py | 2 +- web/src/constants/knowledge.ts | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/api/db/db_models.py b/api/db/db_models.py index ecd97b2c64..1357170302 100644 --- a/api/db/db_models.py +++ b/api/db/db_models.py @@ -759,7 +759,7 @@ class Dialog(DataBaseModel): help_text="English|Chinese") llm_id = CharField(max_length=128, null=False, help_text="default llm ID") llm_setting = JSONField(null=False, default={"temperature": 0.1, "top_p": 0.3, "frequency_penalty": 0.7, - "presence_penalty": 0.4, "max_tokens": 215}) + "presence_penalty": 0.4, "max_tokens": 512}) prompt_type = CharField( max_length=16, null=False, diff --git a/web/src/constants/knowledge.ts b/web/src/constants/knowledge.ts index 8413560ea5..217f78b54a 100644 --- a/web/src/constants/knowledge.ts +++ b/web/src/constants/knowledge.ts @@ -31,14 +31,14 @@ export const settledModelVariableMap = { top_p: 0.3, frequency_penalty: 0.7, presence_penalty: 0.4, - max_tokens: 215, + max_tokens: 512, }, [ModelVariableType.Balance]: { temperature: 0.5, top_p: 0.5, frequency_penalty: 0.7, presence_penalty: 0.4, - max_tokens: 215, + max_tokens: 512, }, };