From 194ac472caa317025f6c6b037b8a830e77cd7f87 Mon Sep 17 00:00:00 2001 From: wangjian052163 Date: Mon, 2 Dec 2024 09:39:39 +0800 Subject: [PATCH] Fix errors in test documentation (#373) --- docs/en/Cookbook/streaming.md | 8 ++--- docs/zh/Cookbook/streaming.md | 8 ++--- lazyllm/docs/components.py | 10 +++---- lazyllm/docs/flow.py | 6 ++-- lazyllm/docs/module.py | 56 ++++++++++------------------------- lazyllm/docs/tools.py | 12 ++++---- 6 files changed, 38 insertions(+), 62 deletions(-) diff --git a/docs/en/Cookbook/streaming.md b/docs/en/Cookbook/streaming.md index 0fad7c3d..cb609e8d 100644 --- a/docs/en/Cookbook/streaming.md +++ b/docs/en/Cookbook/streaming.md @@ -18,7 +18,7 @@ Let's first simply implement a streaming conversational robot with a front-end i import lazyllm llm = lazyllm.TrainableModule("internlm2-chat-20b", stream=True) # or llm = lazyllm.OnlineChatModule(stream=True) -lazyllm.WebModule(llm, port=23333).start().wait() +lazyllm.WebModule(llm, port=23333, stream=True).start().wait() ``` Isn't the implementation very simple? You just need to define the model using streaming, and leave the rest of the work to [WebModule][lazyllm.tools.webpages.WebModule] to handle it. Then the messages displayed to the user will be displayed in a streaming manner on the front-end interface. @@ -129,7 +129,7 @@ Now there is only one last step left. We use [WebModule][lazyllm.tools.webpages. ```python import lazyllm -lazyllm.WebModule(agent, port=23333).start().wait() +lazyllm.WebModule(agent, port=23333, stream=True).start().wait() ``` Now we have completed a conversational robot that supports streaming output and [FunctionCall][lazyllm.tools.agent.FunctionCall]. When there is information to show to the user, the interface will stream the message content. And [FunctionCall][lazyllm.tools.agent.FunctionCall] will execute normally. @@ -186,7 +186,7 @@ def get_n_day_weather_forecast(location: str, num_days: int, unit: Literal["cels llm = lazyllm.TrainableModule("internlm2-chat-20b", stream=True).start() # or llm = lazyllm.OnlineChatModule() tools = ["get_current_weather", "get_n_day_weather_forecast"] agent = FunctionCallAgent(llm, tools) -lazyllm.WebModule(agent, port=23333).start().wait() +lazyllm.WebModule(agent, port=23333, stream=True).start().wait() ``` The effect is as follows: @@ -255,7 +255,7 @@ def get_n_day_weather_forecast(location: str, num_days: int, unit: Literal["cels llm = lazyllm.TrainableModule("internlm2-chat-20b", stream=True, return_trace=True).start() # or llm = lazyllm.OnlineChatModule(return_trace=True) tools = ["get_current_weather", "get_n_day_weather_forecast"] agent = FunctionCallAgent(llm, tools, return_trace=True) -lazyllm.WebModule(agent, port=23333).start().wait() +lazyllm.WebModule(agent, port=23333, stream=True).start().wait() ``` The effect is as follows: diff --git a/docs/zh/Cookbook/streaming.md b/docs/zh/Cookbook/streaming.md index d46f79d8..3714e6be 100644 --- a/docs/zh/Cookbook/streaming.md +++ b/docs/zh/Cookbook/streaming.md @@ -18,7 +18,7 @@ import lazyllm llm = lazyllm.TrainableModule("internlm2-chat-20b", stream=True) # or llm = lazyllm.OnlineChatModule(stream=True) -lazyllm.WebModule(llm, port=23333).start().wait() +lazyllm.WebModule(llm, port=23333, stream=True).start().wait() ``` 实现是不是很简单,只需要定义好模型使用流式,其余工作交给 [WebModule][lazyllm.tools.webpages.WebModule] 来处理即可,则在前端界面上会流式的显示展示给用户的消息。 @@ -129,7 +129,7 @@ agent = FunctionCallAgent(llm, tools) ```python import lazyllm -lazyllm.WebModule(agent, port=23333).start().wait() +lazyllm.WebModule(agent, port=23333, stream=True).start().wait() ``` 现在便完成了支持流式输出和 [FunctionCall][lazyllm.tools.agent.FunctionCall] 的对话机器人。当有给用户展示的信息时,界面便会流式的输出消息内容。而 [FunctionCall][lazyllm.tools.agent.FunctionCall] 会正常执行。 @@ -186,7 +186,7 @@ def get_n_day_weather_forecast(location: str, num_days: int, unit: Literal["cels llm = lazyllm.TrainableModule("internlm2-chat-20b", stream=True).start() # or llm = lazyllm.OnlineChatModule() tools = ["get_current_weather", "get_n_day_weather_forecast"] agent = FunctionCallAgent(llm, tools) -lazyllm.WebModule(agent, port=23333).start().wait() +lazyllm.WebModule(agent, port=23333, stream=True).start().wait() ``` 效果如下: @@ -256,7 +256,7 @@ def get_n_day_weather_forecast(location: str, num_days: int, unit: Literal["cels llm = lazyllm.TrainableModule("internlm2-chat-20b", stream=True, return_trace=True).start() # or llm = lazyllm.OnlineChatModule(return_trace=True) tools = ["get_current_weather", "get_n_day_weather_forecast"] agent = FunctionCallAgent(llm, tools, return_trace=True) -lazyllm.WebModule(agent, port=23333).start().wait() +lazyllm.WebModule(agent, port=23333, stream=True).start().wait() ``` 效果如下: diff --git a/lazyllm/docs/components.py b/lazyllm/docs/components.py index 39e03b37..ac6f2029 100644 --- a/lazyllm/docs/components.py +++ b/lazyllm/docs/components.py @@ -963,23 +963,23 @@ >>> from lazyllm import ChatPrompter >>> p = ChatPrompter('hello world') >>> p.generate_prompt('this is my input') -'<|start_system|>You are an AI-Agent developed by LazyLLM.hello world\\\\n\\\\n<|end_system|>\\\\n\\\\n\\\\n<|Human|>:\\\\nthis is my input\\\\n<|Assistant|>:\\\\n' +'You are an AI-Agent developed by LazyLLM.hello world\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nthis is my input\\\\n\\\\n' >>> p.generate_prompt('this is my input', return_dict=True) {'messages': [{'role': 'system', 'content': 'You are an AI-Agent developed by LazyLLM.\\\\nhello world\\\\n\\\\n'}, {'role': 'user', 'content': 'this is my input'}]} >>> >>> p = ChatPrompter('hello world {instruction}', extro_keys=['knowledge']) >>> p.generate_prompt(dict(instruction='this is my ins', input='this is my inp', knowledge='LazyLLM-Knowledge')) -'<|start_system|>You are an AI-Agent developed by LazyLLM.hello world this is my ins\\\\nHere are some extra messages you can referred to:\\\\n\\\\n### knowledge:\\\\nLazyLLM-Knowledge\\\\n\\\\n\\\\n<|end_system|>\\\\n\\\\n\\\\n<|Human|>:\\\\nthis is my inp\\\\n<|Assistant|>:\\\\n' +'You are an AI-Agent developed by LazyLLM.hello world this is my ins\\\\nHere are some extra messages you can referred to:\\\\n\\\\n### knowledge:\\\\nLazyLLM-Knowledge\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nthis is my inp\\\\n\\\\n' >>> p.generate_prompt(dict(instruction='this is my ins', input='this is my inp', knowledge='LazyLLM-Knowledge'), return_dict=True) {'messages': [{'role': 'system', 'content': 'You are an AI-Agent developed by LazyLLM.\\\\nhello world this is my ins\\\\nHere are some extra messages you can referred to:\\\\n\\\\n### knowledge:\\\\nLazyLLM-Knowledge\\\\n\\\\n\\\\n'}, {'role': 'user', 'content': 'this is my inp'}]} >>> p.generate_prompt(dict(instruction='this is my ins', input='this is my inp', knowledge='LazyLLM-Knowledge'), history=[['s1', 'e1'], ['s2', 'e2']]) -'<|start_system|>You are an AI-Agent developed by LazyLLM.hello world this is my ins\\\\nHere are some extra messages you can referred to:\\\\n\\\\n### knowledge:\\\\nLazyLLM-Knowledge\\\\n\\\\n\\\\n<|end_system|>\\\\n\\\\n<|Human|>:s1<|Assistant|>:e1<|Human|>:s2<|Assistant|>:e2\\\\n<|Human|>:\\\\nthis is my inp\\\\n<|Assistant|>:\\\\n' +'You are an AI-Agent developed by LazyLLM.hello world this is my ins\\\\nHere are some extra messages you can referred to:\\\\n\\\\n### knowledge:\\\\nLazyLLM-Knowledge\\\\n\\\\n\\\\n\\\\n\\\\ns1e1s2e2\\\\n\\\\nthis is my inp\\\\n\\\\n' >>> >>> p = ChatPrompter(dict(system="hello world", user="this is user instruction {input} ")) >>> p.generate_prompt(dict(input="my input", query="this is user query")) -'<|start_system|>You are an AI-Agent developed by LazyLLM.hello world\\\\n\\\\n<|end_system|>\\\\n\\\\n\\\\n<|Human|>:\\\\nthis is user instruction my input this is user query\\\\n<|Assistant|>:\\\\n' +'You are an AI-Agent developed by LazyLLM.hello world\\\\n\\\\n\\\\n\\\\nthis is user instruction my input this is user query\\\\n\\\\n' >>> p.generate_prompt(dict(input="my input", query="this is user query"), return_dict=True) -{'messages': [{'role': 'system', 'content': 'You are an AI-Agent developed by LazyLLM.\\\\nhello world\\\\n\\\\n'}, {'role': 'user', 'content': 'this is user instruction my input this is user query'}]} +{'messages': [{'role': 'system', 'content': 'You are an AI-Agent developed by LazyLLM.\\\\nhello world'}, {'role': 'user', 'content': 'this is user instruction my input this is user query'}]} ''') # ============= MultiModal diff --git a/lazyllm/docs/flow.py b/lazyllm/docs/flow.py index 2a97cd6b..1c424d3f 100644 --- a/lazyllm/docs/flow.py +++ b/lazyllm/docs/flow.py @@ -115,9 +115,9 @@ ... >>> flow = lazyllm.pipeline(test1, lazyllm.pipeline(test2, test3)) >>> flow.for_each(lambda x: callable(x), lambda x: print(x)) - - - + + + """) add_chinese_doc('Parallel', """\ diff --git a/lazyllm/docs/module.py b/lazyllm/docs/module.py index 3bd3a11e..92625350 100644 --- a/lazyllm/docs/module.py +++ b/lazyllm/docs/module.py @@ -42,10 +42,10 @@ ... >>> m = Module2() >>> m.submodules -[<__main__.Module object at 0x7f3dc3bb16f0>] +[] >>> m.m3 = Module() >>> m.submodules -[<__main__.Module object at 0x7f3dc3bb16f0>, <__main__.Module object at 0x7f3dc3bb0be0>] +[, ] ''') add_chinese_doc('ModuleBase.forward', '''\ @@ -116,7 +116,7 @@ add_example('ModuleBase.update', '''\ >>> import lazyllm ->>> m = lazyllm.module.TrainableModule().finetune_method(lazyllm.finetune.dummy).deploy_method(lazyllm.deploy.dummy).mode('finetune').prompt(None) +>>> m = lazyllm.module.TrainableModule().finetune_method(lazyllm.finetune.dummy).trainset("").deploy_method(lazyllm.deploy.dummy).mode('finetune').prompt(None) >>> m.evalset([1, 2, 3]) >>> m.update() INFO: (lazyllm.launcher) PID: dummy finetune!, and init-args is {} @@ -134,7 +134,7 @@ add_example('ModuleBase.evalset', '''\ >>> import lazyllm ->>> m = lazyllm.module.TrainableModule().deploy_method(layzllm.deploy.dummy).finetune_method(lazyllm.finetune.dummy).mode("finetune").prompt(None) +>>> m = lazyllm.module.TrainableModule().deploy_method(lazyllm.deploy.dummy).finetune_method(lazyllm.finetune.dummy).trainset("").mode("finetune").prompt(None) >>> m.evalset([1, 2, 3]) >>> m.update() INFO: (lazyllm.launcher) PID: dummy finetune!, and init-args is {} @@ -743,45 +743,21 @@ add_example('OnlineChatModule', '''\ >>> import lazyllm +>>> from functools import partial >>> m = lazyllm.OnlineChatModule(source="sensenova", stream=True) >>> query = "Hello!" ->>> resp = m(query) ->>> for r in resp: -... print(r) +>>> with lazyllm.ThreadPoolExecutor(1) as executor: +... future = executor.submit(partial(m, llm_chat_history=[]), query) +... while True: +... if value := lazyllm.FileSystemQueue().dequeue(): +... print(f"output: {''.join(value)}") +... elif future.done(): +... break +... print(f"ret: {future.result()}") ... -{'content': '你好'} -{'content': '!'} -{'content': '有什么'} -{'content': '我可以'} -{'content': '帮助'} -{'content': '你的'} -{'content': '吗'} -{'content': '?'} -{'content': ''} ->>> m = lazyllm.OnlineChatModule(source="sensenova", model="nova-ptc-s-v2", stream=False) ->>> train_file = "toy_chat_fine_tuning.jsonl" ->>> m.set_train_tasks(train_file=train_file, upload_url="https://file.sensenova.cn/v1/files") ->>> m._get_train_tasks() -Num examples: -First example: -{'role': 'system', 'content': 'Marv is a factual chatbot that is also sarcastic.'} -{'role': 'user', 'content': "What's the capital of France?"} -{'role': 'assistant', 'content': "Paris, as if everyone doesn't know that already."} -No errors found -train file id: 7193d9a3-8b6e-4332-99cc-724dec75d9dd -toy_chat_fine_tuning.jsonl upload success! file id is d632e591-f668-43a1-b5bf-49418e9c0fec -fine tuning job ft-85f7bc96034441f2b64f9a5fff5d5b9c created, status: SUBMITTED -fine tuning job ft-85f7bc96034441f2b64f9a5fff5d5b9c status: RUNNING -... -fine tuning job ft-85f7bc96034441f2b64f9a5fff5d5b9c status: SUCCEEDED -fine tuned model: nova-ptc-s-v2:ft-fee492082cbe4a6d880d396f34f1bc50 finished ->>> m._get_deploy_tasks() -deployment c5aaf3bf-ef9b-4797-8c15-12ff04ed5372 created, status: SUBMITTED -... -deployment c5aaf3bf-ef9b-4797-8c15-12ff04ed5372 status: PENDING -... -deployment c5aaf3bf-ef9b-4797-8c15-12ff04ed5372 status: RUNNING -deployment c5aaf3bf-ef9b-4797-8c15-12ff04ed5372 finished +output: Hello +output: ! How can I assist you today? +ret: Hello! How can I assist you today? ''') add_chinese_doc('OnlineEmbeddingModule', '''\ diff --git a/lazyllm/docs/tools.py b/lazyllm/docs/tools.py index 8a0df141..f157ef4a 100644 --- a/lazyllm/docs/tools.py +++ b/lazyllm/docs/tools.py @@ -201,16 +201,16 @@ >>> doc1 = Document(dataset_path="your_files_path", create_ui=False) >>> doc2 = Document(dataset_path="your_files_path", create_ui=False) >>> doc1.add_reader("**/*.yml", YmlReader) ->>> print(doc1._local_file_reader) -# {'**/*.yml': } ->>> print(doc2._local_file_reader) -# {} +>>> print(doc1._impl._local_file_reader) +{'**/*.yml': } +>>> print(doc2._impl._local_file_reader) +{} >>> files = ["your_yml_files"] >>> Document.register_global_reader("**/*.yml", processYml) >>> doc1._impl._reader.load_data(input_files=files) -# Call the class YmlReader. +Call the class YmlReader. >>> doc2._impl._reader.load_data(input_files=files) -# Call the function processYml. +Call the function processYml. ''') add_english_doc('rag.readers.ReaderBase', '''