From 3712b0cd1293b3224057510139efae370d0dd421 Mon Sep 17 00:00:00 2001 From: wangjian Date: Thu, 13 Jun 2024 13:58:28 +0800 Subject: [PATCH 1/4] modify onlineChatModule prompter setting --- .../module/onlineChatModule/doubaoModule.py | 2 -- lazyllm/module/onlineChatModule/glmModule.py | 2 -- .../onlineChatModule/moonshotaiModule.py | 5 +---- .../onlineChatModule/onlineChatModuleBase.py | 18 +++++++++------- .../module/onlineChatModule/openaiModule.py | 2 -- lazyllm/module/onlineChatModule/qwenModule.py | 2 -- .../onlineChatModule/sensenovaModule.py | 21 ++++++++++++------- 7 files changed, 24 insertions(+), 28 deletions(-) diff --git a/lazyllm/module/onlineChatModule/doubaoModule.py b/lazyllm/module/onlineChatModule/doubaoModule.py index 35b24603..ec65d5ce 100644 --- a/lazyllm/module/onlineChatModule/doubaoModule.py +++ b/lazyllm/module/onlineChatModule/doubaoModule.py @@ -8,14 +8,12 @@ class DoubaoModule(OnlineChatModuleBase): def __init__(self, model: str, base_url: str = "https://ark.cn-beijing.volces.com/api/v3", - system_prompt: str = "You are a helpful assistant.", stream: bool = True, return_trace: bool = False): super().__init__(model_type=self.__class__.__name__, api_key=lazyllm.config['doubao_api_key'], base_url=base_url, model_name=model, - system_prompt=system_prompt, stream=stream, trainable_models=[], return_trace=return_trace) diff --git a/lazyllm/module/onlineChatModule/glmModule.py b/lazyllm/module/onlineChatModule/glmModule.py index 42514fda..6e3ae4dc 100644 --- a/lazyllm/module/onlineChatModule/glmModule.py +++ b/lazyllm/module/onlineChatModule/glmModule.py @@ -12,7 +12,6 @@ class GLMModule(OnlineChatModuleBase, FileHandlerBase): def __init__(self, base_url: str = "https://open.bigmodel.cn/api/paas/v4", model: str = "glm-4", - system_prompt: str = "你是一个乐于解答各种问题的助手,你的任务是为用户提供专业、准确、有见地的建议。", stream: str = True, return_trace: bool = False, **kwargs): @@ -22,7 +21,6 @@ def __init__(self, base_url=base_url, model_name=model, stream=stream, - system_prompt=system_prompt, trainable_models=GLMModule.TRAINABLE_MODEL_LIST, return_trace=return_trace, **kwargs) diff --git a/lazyllm/module/onlineChatModule/moonshotaiModule.py b/lazyllm/module/onlineChatModule/moonshotaiModule.py index 75ebf4c0..3c9bf1c9 100644 --- a/lazyllm/module/onlineChatModule/moonshotaiModule.py +++ b/lazyllm/module/onlineChatModule/moonshotaiModule.py @@ -7,9 +7,6 @@ class MoonshotAIModule(OnlineChatModuleBase): def __init__(self, base_url="https://api.moonshot.cn", model="moonshot-v1-8k", - system_prompt="你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。\ - 你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,\ - 黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。", stream=True, return_trace=False, **kwargs): @@ -18,11 +15,11 @@ def __init__(self, api_key=lazyllm.config['moonshotai_api_key'], base_url=base_url, model_name=model, - system_prompt=system_prompt, stream=stream, trainable_models=[], return_trace=return_trace, **kwargs) + def _set_chat_url(self): self._url = os.path.join(self._base_url, 'v1/chat/completions') diff --git a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py index 0ba3b60b..733c8284 100644 --- a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py +++ b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py @@ -14,11 +14,9 @@ def __init__(self, api_key: str, base_url: str, model_name: str, - system_prompt: str, stream: bool, trainable_models: List[str], return_trace: bool = False, - prompter: PrompterBase = None, **kwargs): super().__init__(return_trace=return_trace) self._model_type = model_type @@ -27,19 +25,23 @@ def __init__(self, self._api_key = api_key self._base_url = base_url self._model_name = model_name - self.system_prompt(prompt=system_prompt) self._stream = stream self.trainable_mobels = trainable_models self._set_headers() self._set_chat_url() - self._prompt = prompter if prompter else ChatPrompter() + self.prompt() self._is_trained = False - def system_prompt(self, prompt: str = ""): - if len(prompt) > 0: - self._system_prompt = {"role": "system", "content": prompt} + def prompt(self, prompt = None): + if prompt is None: + self._prompt = ChatPrompter() + elif isinstance(prompt, PrompterBase): + self._prompt = prompt + elif isinstatnce(prompt, str): + self._prompt = ChatPrompter(prompt) else: - self._system_prompt = {"role": "system", "content": "You are a helpful assistant."} + raise TypeError(f"{prmpt} type is not supported.") + return self def _set_headers(self): self._headers = { diff --git a/lazyllm/module/onlineChatModule/openaiModule.py b/lazyllm/module/onlineChatModule/openaiModule.py index f7c0bf5d..0bc64cc5 100644 --- a/lazyllm/module/onlineChatModule/openaiModule.py +++ b/lazyllm/module/onlineChatModule/openaiModule.py @@ -14,7 +14,6 @@ class OpenAIModule(OnlineChatModuleBase, FileHandlerBase): def __init__(self, base_url: str = "https://api.openai.com/v1", model: str = "gpt-3.5-turbo", - system_prompt: str = "You are a helpful assistant.", stream: bool = True, return_trace: bool = False, **kwargs): @@ -23,7 +22,6 @@ def __init__(self, api_key=lazyllm.config['openai_api_key'], base_url=base_url, model_name=model, - system_prompt=system_prompt, stream=stream, trainable_models=OpenAIModule.TRAINABLE_MODEL_LIST, return_trace=return_trace, diff --git a/lazyllm/module/onlineChatModule/qwenModule.py b/lazyllm/module/onlineChatModule/qwenModule.py index def52410..5a65c7e7 100644 --- a/lazyllm/module/onlineChatModule/qwenModule.py +++ b/lazyllm/module/onlineChatModule/qwenModule.py @@ -17,7 +17,6 @@ class QwenModule(OnlineChatModuleBase, FileHandlerBase): def __init__(self, base_url: str = "https://dashscope.aliyuncs.com", model: str = "qwen-plus", - system_prompt: str = "You are a helpful assistant.", stream: bool = True, return_trace: bool = False, **kwargs): @@ -26,7 +25,6 @@ def __init__(self, api_key=lazyllm.config['qwen_api_key'], base_url=base_url, model_name=model, - system_prompt=system_prompt, stream=stream, trainable_models=QwenModule.TRAINABLE_MODEL_LIST, return_trace=return_trace, diff --git a/lazyllm/module/onlineChatModule/sensenovaModule.py b/lazyllm/module/onlineChatModule/sensenovaModule.py index ab1f430e..da9691ce 100644 --- a/lazyllm/module/onlineChatModule/sensenovaModule.py +++ b/lazyllm/module/onlineChatModule/sensenovaModule.py @@ -13,7 +13,6 @@ class SenseNovaModule(OnlineChatModuleBase, FileHandlerBase): def __init__(self, base_url="https://api.sensenova.cn/v1/llm", model="SenseChat-5", - system_prompt="You are an AI assistant whose name is InternLM (书生·浦语).", stream=True, return_trace=False, **kwargs): @@ -24,7 +23,6 @@ def __init__(self, base_url=base_url, model_name=model, stream=stream, - system_prompt=system_prompt, trainable_models=SenseNovaModule.TRAINABLE_MODEL_LIST, return_trace=return_trace, **kwargs) @@ -63,12 +61,19 @@ def _parse_response_stream(self, response: str) -> str: return chunk def _parse_response_non_stream(self, response: str) -> str: - cur_msg = json.loads(response)['data']["choices"][0] - content = cur_msg.get("message", "") - msg = {"role": cur_msg["role"], "content": content} - cur_msg.pop("role") - cur_msg['message'] = msg - return cur_msg + try: + resp = json.loads(response)['data'] + content = resp["choices"][0].get("message", "") + msg = {"role": resp['choices'][0].pop("role"), "content": content} + resp['choices'][0]['message'] = msg + if 'tool_calls' in resp['choices'][0]: + tool_calls = resp['choices'][0].pop("tool_calls") + resp['choices'][0]['message']['tool_calls'] = tool_calls + resp['model'] = self._model_name + return resp["choices"][0] + except Exception as e: + lazyllm.LOG.error(e) + return "" def _convert_file_format(self, filepath: str) -> None: with open(filepath, 'r', encoding='utf-8') as fr: From 906c621422fdbd033089cdd201c8ccabf3e532a4 Mon Sep 17 00:00:00 2001 From: wangjian Date: Thu, 13 Jun 2024 17:50:51 +0800 Subject: [PATCH 2/4] modify the onlineChatModule system_prompt setting --- lazyllm/module/onlineChatModule/doubaoModule.py | 3 +++ lazyllm/module/onlineChatModule/glmModule.py | 3 +++ lazyllm/module/onlineChatModule/moonshotaiModule.py | 5 +++++ lazyllm/module/onlineChatModule/onlineChatModuleBase.py | 8 ++++++-- lazyllm/module/onlineChatModule/openaiModule.py | 3 +++ lazyllm/module/onlineChatModule/qwenModule.py | 3 +++ lazyllm/module/onlineChatModule/sensenovaModule.py | 3 +++ 7 files changed, 26 insertions(+), 2 deletions(-) diff --git a/lazyllm/module/onlineChatModule/doubaoModule.py b/lazyllm/module/onlineChatModule/doubaoModule.py index ec65d5ce..b3d9f68a 100644 --- a/lazyllm/module/onlineChatModule/doubaoModule.py +++ b/lazyllm/module/onlineChatModule/doubaoModule.py @@ -18,6 +18,9 @@ def __init__(self, trainable_models=[], return_trace=return_trace) + def _get_system_prompt(self): + return "你是人工智能助手豆包。你的任务是针对用户的问题和要求提供适当的答复和支持。" + def _set_chat_url(self): self._url = os.path.join(self._base_url, 'chat/completions') diff --git a/lazyllm/module/onlineChatModule/glmModule.py b/lazyllm/module/onlineChatModule/glmModule.py index 6e3ae4dc..7ef25d5e 100644 --- a/lazyllm/module/onlineChatModule/glmModule.py +++ b/lazyllm/module/onlineChatModule/glmModule.py @@ -26,6 +26,9 @@ def __init__(self, **kwargs) FileHandlerBase.__init__(self) + def _get_system_prompt(self): + return "你是人工智能助手智谱清言(ChatGLM),是基于智谱 AI 公司于2023训练的语言模型开发的。你的任务是针对用户的问题和要求提供适当的答复和支持。" + def _get_models_list(self): return ["glm-4", "glm-4v", "glm-3-turbo", "chatglm-turbo", "cogview-3", "embedding-2", "text-embedding"] diff --git a/lazyllm/module/onlineChatModule/moonshotaiModule.py b/lazyllm/module/onlineChatModule/moonshotaiModule.py index 3c9bf1c9..80fceb18 100644 --- a/lazyllm/module/onlineChatModule/moonshotaiModule.py +++ b/lazyllm/module/onlineChatModule/moonshotaiModule.py @@ -20,6 +20,11 @@ def __init__(self, return_trace=return_trace, **kwargs) + def _get_system_prompt(self): + return "你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。\ + 你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,\ + 黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。" + def _set_chat_url(self): self._url = os.path.join(self._base_url, 'v1/chat/completions') diff --git a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py index 733c8284..35732016 100644 --- a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py +++ b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py @@ -37,12 +37,16 @@ def prompt(self, prompt = None): self._prompt = ChatPrompter() elif isinstance(prompt, PrompterBase): self._prompt = prompt - elif isinstatnce(prompt, str): + elif isinstance(prompt, str): self._prompt = ChatPrompter(prompt) else: - raise TypeError(f"{prmpt} type is not supported.") + raise TypeError(f"{prompt} type is not supported.") + self._prompt._set_model_configs(system=self._get_system_prompt()) return self + def _get_system_prompt(self): + raise NotImplementedError("_get_system_prompt is not implemented.") + def _set_headers(self): self._headers = { 'Content-Type': 'application/json', diff --git a/lazyllm/module/onlineChatModule/openaiModule.py b/lazyllm/module/onlineChatModule/openaiModule.py index 0bc64cc5..9ee80f7f 100644 --- a/lazyllm/module/onlineChatModule/openaiModule.py +++ b/lazyllm/module/onlineChatModule/openaiModule.py @@ -28,6 +28,9 @@ def __init__(self, **kwargs) FileHandlerBase.__init__(self) + def _get_system_prompt(self): + return "You are ChatGPT, a large language model trained by OpenAI.You are a helpful assistant." + def _convert_file_format(self, filepath: str) -> str: with open(filepath, 'r', encoding='utf-8') as fr: dataset = [json.loads(line) for line in fr] diff --git a/lazyllm/module/onlineChatModule/qwenModule.py b/lazyllm/module/onlineChatModule/qwenModule.py index 5a65c7e7..9d07b82f 100644 --- a/lazyllm/module/onlineChatModule/qwenModule.py +++ b/lazyllm/module/onlineChatModule/qwenModule.py @@ -32,6 +32,9 @@ def __init__(self, FileHandlerBase.__init__(self) self._deploy_paramters = None + def _get_system_prompt(self): + return "你是来自阿里云的大规模语言模型,你叫通义千问,你是一个有用的助手。" + def _set_chat_url(self): self._url = os.path.join(self._base_url, 'compatible-mode/v1/chat/completions') diff --git a/lazyllm/module/onlineChatModule/sensenovaModule.py b/lazyllm/module/onlineChatModule/sensenovaModule.py index da9691ce..486c624a 100644 --- a/lazyllm/module/onlineChatModule/sensenovaModule.py +++ b/lazyllm/module/onlineChatModule/sensenovaModule.py @@ -29,6 +29,9 @@ def __init__(self, FileHandlerBase.__init__(self) self._deploy_paramters = None + def _get_system_prompt(self): + return "You are an AI assistant whose name is InternLM (书生·浦语), develped by SenseTime." + @staticmethod def encode_jwt_token(ak: str, sk: str) -> str: headers = { From 17cc923d767f763989732c5d3ff59cdca0f689e5 Mon Sep 17 00:00:00 2001 From: wangjian Date: Thu, 13 Jun 2024 18:08:59 +0800 Subject: [PATCH 3/4] modify sensenova system prompt --- lazyllm/module/onlineChatModule/sensenovaModule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lazyllm/module/onlineChatModule/sensenovaModule.py b/lazyllm/module/onlineChatModule/sensenovaModule.py index 486c624a..38342d88 100644 --- a/lazyllm/module/onlineChatModule/sensenovaModule.py +++ b/lazyllm/module/onlineChatModule/sensenovaModule.py @@ -30,7 +30,7 @@ def __init__(self, self._deploy_paramters = None def _get_system_prompt(self): - return "You are an AI assistant whose name is InternLM (书生·浦语), develped by SenseTime." + return "You are an AI assistant, developed by SenseTime and released in 2023." @staticmethod def encode_jwt_token(ak: str, sk: str) -> str: From b329ec06e001f37fbc34856c743b35f07b6f07a4 Mon Sep 17 00:00:00 2001 From: wangjian Date: Thu, 13 Jun 2024 18:18:25 +0800 Subject: [PATCH 4/4] modify the python style --- lazyllm/module/onlineChatModule/moonshotaiModule.py | 1 - lazyllm/module/onlineChatModule/onlineChatModuleBase.py | 4 ++-- lazyllm/module/onlineChatModule/sensenovaModule.py | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lazyllm/module/onlineChatModule/moonshotaiModule.py b/lazyllm/module/onlineChatModule/moonshotaiModule.py index 80fceb18..fe70901e 100644 --- a/lazyllm/module/onlineChatModule/moonshotaiModule.py +++ b/lazyllm/module/onlineChatModule/moonshotaiModule.py @@ -25,6 +25,5 @@ def _get_system_prompt(self): 你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,\ 黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。" - def _set_chat_url(self): self._url = os.path.join(self._base_url, 'v1/chat/completions') diff --git a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py index 35732016..e6bfb614 100644 --- a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py +++ b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py @@ -29,10 +29,10 @@ def __init__(self, self.trainable_mobels = trainable_models self._set_headers() self._set_chat_url() - self.prompt() + self.prompt() self._is_trained = False - def prompt(self, prompt = None): + def prompt(self, prompt=None): if prompt is None: self._prompt = ChatPrompter() elif isinstance(prompt, PrompterBase): diff --git a/lazyllm/module/onlineChatModule/sensenovaModule.py b/lazyllm/module/onlineChatModule/sensenovaModule.py index 38342d88..ba4566f0 100644 --- a/lazyllm/module/onlineChatModule/sensenovaModule.py +++ b/lazyllm/module/onlineChatModule/sensenovaModule.py @@ -70,8 +70,8 @@ def _parse_response_non_stream(self, response: str) -> str: msg = {"role": resp['choices'][0].pop("role"), "content": content} resp['choices'][0]['message'] = msg if 'tool_calls' in resp['choices'][0]: - tool_calls = resp['choices'][0].pop("tool_calls") - resp['choices'][0]['message']['tool_calls'] = tool_calls + tool_calls = resp['choices'][0].pop("tool_calls") + resp['choices'][0]['message']['tool_calls'] = tool_calls resp['model'] = self._model_name return resp["choices"][0] except Exception as e: