From 22bf9b4bc68f241097fb1a5a6262b5eb3ee26d66 Mon Sep 17 00:00:00 2001 From: wangzhihong Date: Wed, 11 Sep 2024 15:37:34 +0800 Subject: [PATCH] Update version to 0.2.4 and cherry-pick some features (#242) Co-authored-by: wangjian052163 Co-authored-by: SunXiaoye <31361630+JingofXin@users.noreply.github.com> Co-authored-by: lwj-st <109193776+lwj-st@users.noreply.github.com> --- .github/workflows/main.yml | 11 ++-- lazyllm/docs/module.py | 22 ++++---- lazyllm/module/module.py | 9 ++-- pyproject.toml | 2 +- tests/advanced_tests/full_test/pytest.ini | 2 + tests/advanced_tests/standard_test/pytest.ini | 2 + .../standard_test/test_intent_classifier.py | 10 +++- .../standard_test/test_llm_parser.py | 20 ++++--- .../standard_test/test_trainable_fc.py | 54 +++++++++---------- tests/basic_tests/pytest.ini | 2 + tests/charge_tests/pytest.ini | 2 + 11 files changed, 79 insertions(+), 57 deletions(-) create mode 100644 tests/advanced_tests/full_test/pytest.ini create mode 100644 tests/advanced_tests/standard_test/pytest.ini create mode 100644 tests/basic_tests/pytest.ini create mode 100644 tests/charge_tests/pytest.ini diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b83db65c..79dc217e 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -21,6 +21,9 @@ concurrency: jobs: Clone: + if: | + !contains(github.event.head_commit.message, '[skip ci]') + && !contains(github.event.pull_request.title, '[skip ci]') runs-on: tps_sco_nv steps: - name: Checkout code @@ -69,7 +72,7 @@ jobs: export LAZYLLM_SCO_WORKSPACE=expert-services export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/ export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models - python -m pytest -vx tests/basic_tests/ + python -m pytest --lf --last-failed-no-failures=all -v tests/basic_tests/ AdvancedStandardTests: runs-on: tps_sco_nv @@ -89,7 +92,7 @@ jobs: export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/ export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models source ~/ENV/env.sh - python -m pytest -vx tests/advanced_tests/standard_test/ + python -m pytest --lf --last-failed-no-failures=all -v tests/advanced_tests/standard_test/ AdvancedFullTests: runs-on: tps_sco_nv @@ -107,7 +110,7 @@ jobs: export LAZYLLM_SCO_WORKSPACE=expert-services export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/ export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models - python -m pytest -vx tests/advanced_tests/full_test/ + python -m pytest --lf --last-failed-no-failures=all -v tests/advanced_tests/full_test/ ChargeTests: runs-on: tps_sco_nv @@ -121,4 +124,4 @@ jobs: export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/ export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models source ~/ENV/env.sh - python -m pytest -vx tests/charge_tests + python -m pytest --lf --last-failed-no-failures=all -v tests/charge_tests diff --git a/lazyllm/docs/module.py b/lazyllm/docs/module.py index 61f640b3..3bd3a11e 100644 --- a/lazyllm/docs/module.py +++ b/lazyllm/docs/module.py @@ -543,15 +543,16 @@ ''') add_chinese_doc('ServerModule', '''\ -借助fastapi,将任意可调用对象包装成api服务,可同时启动一个主服务和多个卫星服务. +借助 fastapi,将任意可调用对象包装成 api 服务,可同时启动一个主服务和多个卫星服务。 Args: - m (Callable): 被包装成服务的函数,可以是一个函数,也可以是一个仿函数。当启动卫星服务时,需要是一个实现了__call__的对象(仿函数)。 - pre (Callable): 前处理函数,在服务进程执行,可以是一个函数,也可以是一个仿函数,默认为None. - post (Callable): 后处理函数,在服务进程执行,可以是一个函数,也可以是一个仿函数,默认为None. - stream (bool): 是否流式请求和输出,默认为非流式 - return_trace (bool): 是否将结果记录在trace中,默认为False - launcher (LazyLLMLaunchersBase): 用于选择服务执行的计算节点,默认为launchers.remote + m (Callable): 被包装成服务的函数,可以是一个函数,也可以是一个仿函数。当启动卫星服务时,需要是一个实现了 ``__call__`` 的对象(仿函数)。 + pre (Callable): 前处理函数,在服务进程执行,可以是一个函数,也可以是一个仿函数,默认为 ``None``。 + post (Callable): 后处理函数,在服务进程执行,可以是一个函数,也可以是一个仿函数,默认为 ``None``。 + stream (bool): 是否流式请求和输出,默认为非流式。 + return_trace (bool): 是否将结果记录在 trace 中,默认为``False``。 + port (int): 指定服务部署后的端口,默认为 ``None`` 会随机生成端口。 + launcher (LazyLLMLaunchersBase): 用于选择服务执行的计算节点,默认为`` launchers.remote``。 ''') add_english_doc('ServerModule', '''\ @@ -559,10 +560,11 @@ Args: m (Callable): The function to be wrapped as a service. It can be a function or a functor. When launching satellite services, it needs to be an object implementing ``__call__`` (a functor). - pre (Callable): Preprocessing function executed in the service process. It can be a function or a functor, default is None. - post (Callable): Postprocessing function executed in the service process. It can be a function or a functor, default is None. + pre (Callable): Preprocessing function executed in the service process. It can be a function or a functor, default is ``None``. + post (Callable): Postprocessing function executed in the service process. It can be a function or a functor, default is ``None``. stream (bool): Whether to request and output in streaming mode, default is non-streaming. - return_trace (bool): Whether to record the results in trace, default is False. + return_trace (bool): Whether to record the results in trace, default is ``False``. + port (int): Specifies the port after the service is deployed. The default is ``None``, which will generate a random port. launcher (LazyLLMLaunchersBase): Used to select the compute node for service execution, default is ``launchers.remote`` . **Examples:**\n diff --git a/lazyllm/module/module.py b/lazyllm/module/module.py index 6802a312..89e2773c 100644 --- a/lazyllm/module/module.py +++ b/lazyllm/module/module.py @@ -412,18 +412,19 @@ def _impl(self): @light_reduce class _ServerModuleImpl(ModuleBase): - def __init__(self, m=None, pre=None, post=None, launcher=None, *, father=None): + def __init__(self, m=None, pre=None, post=None, launcher=None, port=None, *, father=None): super().__init__() self._m = ActionModule(m) if isinstance(m, FlowBase) else m self._pre_func, self._post_func = pre, post self._launcher = launcher.clone() if launcher else launchers.remote(sync=False) self._set_url_f = father._set_url if father else None + self._port = port @lazyllm.once_wrapper def _get_deploy_tasks(self): if self._m is None: return None return Pipeline( - lazyllm.deploy.RelayServer(func=self._m, pre_func=self._pre_func, + lazyllm.deploy.RelayServer(func=self._m, pre_func=self._pre_func, port=self._port, post_func=self._post_func, launcher=self._launcher), self._set_url_f) @@ -432,7 +433,7 @@ def __del__(self): class ServerModule(UrlModule): - def __init__(self, m, pre=None, post=None, stream=False, return_trace=False, launcher=None): + def __init__(self, m, pre=None, post=None, stream=False, return_trace=False, port=None, launcher=None): assert stream is False or return_trace is False, 'Module with stream output has no trace' assert (post is None) or (stream is False), 'Stream cannot be true when post-action exists' super().__init__(url=None, stream=stream, return_trace=return_trace) @@ -441,7 +442,7 @@ def __init__(self, m, pre=None, post=None, stream=False, return_trace=False, lau lazyllm.deploy.RelayServer.keys_name_handle, copy.deepcopy(lazyllm.deploy.RelayServer.default_headers), ) - self._impl = _ServerModuleImpl(m, pre, post, launcher, father=self) + self._impl = _ServerModuleImpl(m, pre, post, launcher, port, father=self) _url_id = property(lambda self: self._impl._module_id) diff --git a/pyproject.toml b/pyproject.toml index 51a40054..12ad936e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "lazyllm" -version = "0.2.3" +version = "0.2.4" description = "A Low-code Development Tool For Building Multi-agent LLMs Applications." authors = ["wangzhihong "] license = "Apache-2.0 license" diff --git a/tests/advanced_tests/full_test/pytest.ini b/tests/advanced_tests/full_test/pytest.ini new file mode 100644 index 00000000..e528c26c --- /dev/null +++ b/tests/advanced_tests/full_test/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +cache_dir = .pytest-cache \ No newline at end of file diff --git a/tests/advanced_tests/standard_test/pytest.ini b/tests/advanced_tests/standard_test/pytest.ini new file mode 100644 index 00000000..e528c26c --- /dev/null +++ b/tests/advanced_tests/standard_test/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +cache_dir = .pytest-cache \ No newline at end of file diff --git a/tests/advanced_tests/standard_test/test_intent_classifier.py b/tests/advanced_tests/standard_test/test_intent_classifier.py index 80373ab0..adcd4f0b 100644 --- a/tests/advanced_tests/standard_test/test_intent_classifier.py +++ b/tests/advanced_tests/standard_test/test_intent_classifier.py @@ -1,10 +1,16 @@ from lazyllm.tools import IntentClassifier import lazyllm +from lazyllm.launcher import cleanup class TestIntentClassifier(object): - def setup_method(self): - self._llm = lazyllm.TrainableModule('internlm2-chat-7b') + @classmethod + def setup_class(cls): + cls._llm = lazyllm.TrainableModule('internlm2-chat-7b') + + @classmethod + def teardown_class(cls): + cleanup() def test_intent_classifier(self): intent_list = ["Chat", "Financial Knowledge Q&A", "Employee Information Query", "Weather Query"] diff --git a/tests/advanced_tests/standard_test/test_llm_parser.py b/tests/advanced_tests/standard_test/test_llm_parser.py index cadd9e12..c1ee4f6a 100644 --- a/tests/advanced_tests/standard_test/test_llm_parser.py +++ b/tests/advanced_tests/standard_test/test_llm_parser.py @@ -1,21 +1,25 @@ import unittest from unittest.mock import MagicMock from lazyllm import LLMParser, TrainableModule +from lazyllm.launcher import cleanup class TestLLMParser(unittest.TestCase): - def setUp(self): - self.llm = TrainableModule("internlm2-chat-7b") - self.llm.start() - - self.mock_node = MagicMock() - self.mock_node.get_text.return_value = ( + @classmethod + def setup_class(cls): + cls.llm = TrainableModule("internlm2-chat-7b").start() + cls.mock_node = MagicMock() + cls.mock_node.get_text.return_value = ( "Hello, I am an AI robot developed by SenseTime, named LazyLLM. " "My mission is to assist you in building the most powerful large-scale model applications with minimal cost." ) - self.summary_parser = LLMParser(self.llm, language="en", task_type="summary") - self.keywords_parser = LLMParser(self.llm, language="en", task_type="keywords") + cls.summary_parser = LLMParser(cls.llm, language="en", task_type="summary") + cls.keywords_parser = LLMParser(cls.llm, language="en", task_type="keywords") + + @classmethod + def teardown_class(cls): + cleanup() def test_summary_transform(self): result = self.summary_parser.transform(self.mock_node) diff --git a/tests/advanced_tests/standard_test/test_trainable_fc.py b/tests/advanced_tests/standard_test/test_trainable_fc.py index e95ee9db..97e6f0d7 100644 --- a/tests/advanced_tests/standard_test/test_trainable_fc.py +++ b/tests/advanced_tests/standard_test/test_trainable_fc.py @@ -119,92 +119,90 @@ def LLMWorker(input: str): @pytest.fixture() def exe_trainable_single_function_call(request): params = request.param if hasattr(request, 'param') else {} - model = params.get('model', None) tools = params.get("tools", []) query = params.get("query", "") + llm = request.cls.llm if not query or not tools: raise ValueError(f"query: {query} and {tools} cannot be empty.") - llm = lazyllm.TrainableModule(model).deploy_method(deploy.vllm).start() - print(f"\nStarting test 【{model}】 function calling") + print(f"\nStarting test 【{llm}】 function calling") fc = FunctionCall(llm, tools) ret = fc(query) yield ret - print(f"\n【{model}】 function calling test done.") + print(f"\n【{llm}】 function calling test done.") @pytest.fixture() def exe_trainable_parallel_function_call(request): params = request.param if hasattr(request, 'param') else {} - model = params.get('model', None) tools = params.get("tools", []) query = params.get("query", "") + llm = request.cls.llm if not query or not tools: raise ValueError(f"query: {query} and tools: {tools} cannot be empty.") - llm = lazyllm.TrainableModule(model).deploy_method(deploy.vllm).start() agent = FunctionCallAgent(llm, tools) - print(f"\nStarting test 【{model}】parallel function calling") + print(f"\nStarting test 【{llm}】parallel function calling") ret = agent(query) yield ret - print(f"\n【{model}】parallel function calling test done.") + print(f"\n【{llm}】parallel function calling test done.") @pytest.fixture() def exe_trainable_advance_agent(request): params = request.param if hasattr(request, 'param') else {} - model = params.get('model', None) tools = params.get('tools', []) query = params.get('query', '') Agent = params.get('Agent', None) + llm = request.cls.llm if not query or not tools: raise ValueError(f"query: {query} and tools: {tools} cannot be empty.") if Agent is None: raise ValueError(f"Agent: {Agent} must be a valid value.") - llm = lazyllm.TrainableModule(model).deploy_method(deploy.vllm).start() agent = Agent(llm, tools) - print(f"\nStarting test 【{model}】 {Agent}.") + print(f"\nStarting test 【{llm}】 {Agent}.") ret = agent(query) yield ret - print(f"\n【{model}】 {Agent} test done.") + print(f"\n【{llm}】 {Agent} test done.") tools = ["get_current_weather", "get_n_day_weather_forecast"] squery1 = "What's the weather like today in celsius in Tokyo." squery2 = "What will the weather be like in celsius in Paris tomorrow?" mquery1 = "What's the weather like today in celsius in Tokyo and Paris." mquery2 = "What will the weather be like in fahrenheit in san francisco and beijing tomorrow?" -vModels = ['glm-4-9b-chat', 'qwen2-7b-instruct'] -agentQuery = "What is 20+(2*4)? Calculate step by step." -rewooquery = "What is the name of the cognac house that makes the main ingredient in The Hennchata?" +agentQuery = "计算 20*(45+23)*4, Calculate step by step." +rewooquery = "美国历届总统就职时年龄最大的是谁" class TestTrainableFunctionCall(object): - @pytest.fixture(autouse=True) - def run_around_tests(self): - yield + @classmethod + def setup_class(cls): + models = ["internlm2-chat-20b", "glm-4-9b-chat", "qwen2-7b-instruct", "qwen2-72b-instruct-awq"] + model = random.choice(models) + cls.llm = lazyllm.TrainableModule(model).deploy_method(deploy.vllm).start() + + @classmethod + def teardown_class(cls): cleanup() @pytest.mark.parametrize("exe_trainable_single_function_call", - [{"model": random.choice(vModels), "tools": tools, "query": squery1}, - {"model": random.choice(vModels), "tools": tools, "query": squery2}], + [{"tools": tools, "query": squery1}, + {"tools": tools, "query": squery2}], indirect=True) def test_trainable_single_function_call(self, exe_trainable_single_function_call): ret = exe_trainable_single_function_call assert isinstance(ret, list) @pytest.mark.parametrize("exe_trainable_parallel_function_call", - [{"model": random.choice(vModels), 'tools': tools, 'query': mquery1}, - {"model": random.choice(vModels), 'tools': tools, 'query': mquery2}], + [{'tools': tools, 'query': mquery1}, + {'tools': tools, 'query': mquery2}], indirect=True) def test_trainable_parallel_function_call(self, exe_trainable_parallel_function_call): ret = exe_trainable_parallel_function_call assert isinstance(ret, str) @pytest.mark.parametrize("exe_trainable_advance_agent", - [{"model": random.choice(['internlm2-chat-20b', 'Qwen1.5-14B-Chat']), - 'tools': ['multiply_tool', 'add_tool'], 'query': agentQuery, "Agent": ReactAgent}, - {"model": random.choice(['internlm2-chat-20b', 'Qwen1.5-14B-Chat']), - 'tools': ['multiply_tool', 'add_tool'], 'query': agentQuery, "Agent": PlanAndSolveAgent}, - {"model": random.choice(['GLM-4-9B-Chat', 'Qwen2-72B-Instruct-AWQ']), - 'tools': ['WikipediaWorker', 'LLMWorker'], 'query': rewooquery, "Agent": ReWOOAgent}], + [{'tools': ['multiply_tool', 'add_tool'], 'query': agentQuery, "Agent": ReactAgent}, + {'tools': ['multiply_tool', 'add_tool'], 'query': agentQuery, "Agent": PlanAndSolveAgent}, + {'tools': ['WikipediaWorker', 'LLMWorker'], 'query': rewooquery, "Agent": ReWOOAgent}], indirect=True) def test_trainable_advance_agent(self, exe_trainable_advance_agent): ret = exe_trainable_advance_agent diff --git a/tests/basic_tests/pytest.ini b/tests/basic_tests/pytest.ini new file mode 100644 index 00000000..e528c26c --- /dev/null +++ b/tests/basic_tests/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +cache_dir = .pytest-cache \ No newline at end of file diff --git a/tests/charge_tests/pytest.ini b/tests/charge_tests/pytest.ini new file mode 100644 index 00000000..e528c26c --- /dev/null +++ b/tests/charge_tests/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +cache_dir = .pytest-cache \ No newline at end of file