Skip to content

Commit

Permalink
Update version to 0.2.4 and cherry-pick some features (#242)
Browse files Browse the repository at this point in the history
Co-authored-by: wangjian052163 <[email protected]>
Co-authored-by: SunXiaoye <[email protected]>
Co-authored-by: lwj-st <[email protected]>
  • Loading branch information
4 people authored Sep 11, 2024
1 parent e633ae5 commit 22bf9b4
Show file tree
Hide file tree
Showing 11 changed files with 79 additions and 57 deletions.
11 changes: 7 additions & 4 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ concurrency:

jobs:
Clone:
if: |
!contains(github.event.head_commit.message, '[skip ci]')
&& !contains(github.event.pull_request.title, '[skip ci]')
runs-on: tps_sco_nv
steps:
- name: Checkout code
Expand Down Expand Up @@ -69,7 +72,7 @@ jobs:
export LAZYLLM_SCO_WORKSPACE=expert-services
export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/
export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models
python -m pytest -vx tests/basic_tests/
python -m pytest --lf --last-failed-no-failures=all -v tests/basic_tests/
AdvancedStandardTests:
runs-on: tps_sco_nv
Expand All @@ -89,7 +92,7 @@ jobs:
export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/
export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models
source ~/ENV/env.sh
python -m pytest -vx tests/advanced_tests/standard_test/
python -m pytest --lf --last-failed-no-failures=all -v tests/advanced_tests/standard_test/
AdvancedFullTests:
runs-on: tps_sco_nv
Expand All @@ -107,7 +110,7 @@ jobs:
export LAZYLLM_SCO_WORKSPACE=expert-services
export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/
export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models
python -m pytest -vx tests/advanced_tests/full_test/
python -m pytest --lf --last-failed-no-failures=all -v tests/advanced_tests/full_test/
ChargeTests:
runs-on: tps_sco_nv
Expand All @@ -121,4 +124,4 @@ jobs:
export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/
export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models
source ~/ENV/env.sh
python -m pytest -vx tests/charge_tests
python -m pytest --lf --last-failed-no-failures=all -v tests/charge_tests
22 changes: 12 additions & 10 deletions lazyllm/docs/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,26 +543,28 @@
''')

add_chinese_doc('ServerModule', '''\
借助fastapi,将任意可调用对象包装成api服务,可同时启动一个主服务和多个卫星服务.
借助 fastapi,将任意可调用对象包装成 api 服务,可同时启动一个主服务和多个卫星服务
Args:
m (Callable): 被包装成服务的函数,可以是一个函数,也可以是一个仿函数。当启动卫星服务时,需要是一个实现了__call__的对象(仿函数)。
pre (Callable): 前处理函数,在服务进程执行,可以是一个函数,也可以是一个仿函数,默认为None.
post (Callable): 后处理函数,在服务进程执行,可以是一个函数,也可以是一个仿函数,默认为None.
stream (bool): 是否流式请求和输出,默认为非流式
return_trace (bool): 是否将结果记录在trace中,默认为False
launcher (LazyLLMLaunchersBase): 用于选择服务执行的计算节点,默认为launchers.remote
m (Callable): 被包装成服务的函数,可以是一个函数,也可以是一个仿函数。当启动卫星服务时,需要是一个实现了 ``__call__`` 的对象(仿函数)。
pre (Callable): 前处理函数,在服务进程执行,可以是一个函数,也可以是一个仿函数,默认为 ``None``。
post (Callable): 后处理函数,在服务进程执行,可以是一个函数,也可以是一个仿函数,默认为 ``None``。
stream (bool): 是否流式请求和输出,默认为非流式。
return_trace (bool): 是否将结果记录在 trace 中,默认为``False``。
port (int): 指定服务部署后的端口,默认为 ``None`` 会随机生成端口。
launcher (LazyLLMLaunchersBase): 用于选择服务执行的计算节点,默认为`` launchers.remote``。
''')

add_english_doc('ServerModule', '''\
Using FastAPI, any callable object can be wrapped into an API service, allowing the simultaneous launch of one main service and multiple satellite services.
Args:
m (Callable): The function to be wrapped as a service. It can be a function or a functor. When launching satellite services, it needs to be an object implementing ``__call__`` (a functor).
pre (Callable): Preprocessing function executed in the service process. It can be a function or a functor, default is None.
post (Callable): Postprocessing function executed in the service process. It can be a function or a functor, default is None.
pre (Callable): Preprocessing function executed in the service process. It can be a function or a functor, default is ``None``.
post (Callable): Postprocessing function executed in the service process. It can be a function or a functor, default is ``None``.
stream (bool): Whether to request and output in streaming mode, default is non-streaming.
return_trace (bool): Whether to record the results in trace, default is False.
return_trace (bool): Whether to record the results in trace, default is ``False``.
port (int): Specifies the port after the service is deployed. The default is ``None``, which will generate a random port.
launcher (LazyLLMLaunchersBase): Used to select the compute node for service execution, default is ``launchers.remote`` .
**Examples:**\n
Expand Down
9 changes: 5 additions & 4 deletions lazyllm/module/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,18 +412,19 @@ def _impl(self):

@light_reduce
class _ServerModuleImpl(ModuleBase):
def __init__(self, m=None, pre=None, post=None, launcher=None, *, father=None):
def __init__(self, m=None, pre=None, post=None, launcher=None, port=None, *, father=None):
super().__init__()
self._m = ActionModule(m) if isinstance(m, FlowBase) else m
self._pre_func, self._post_func = pre, post
self._launcher = launcher.clone() if launcher else launchers.remote(sync=False)
self._set_url_f = father._set_url if father else None
self._port = port

@lazyllm.once_wrapper
def _get_deploy_tasks(self):
if self._m is None: return None
return Pipeline(
lazyllm.deploy.RelayServer(func=self._m, pre_func=self._pre_func,
lazyllm.deploy.RelayServer(func=self._m, pre_func=self._pre_func, port=self._port,
post_func=self._post_func, launcher=self._launcher),
self._set_url_f)

Expand All @@ -432,7 +433,7 @@ def __del__(self):


class ServerModule(UrlModule):
def __init__(self, m, pre=None, post=None, stream=False, return_trace=False, launcher=None):
def __init__(self, m, pre=None, post=None, stream=False, return_trace=False, port=None, launcher=None):
assert stream is False or return_trace is False, 'Module with stream output has no trace'
assert (post is None) or (stream is False), 'Stream cannot be true when post-action exists'
super().__init__(url=None, stream=stream, return_trace=return_trace)
Expand All @@ -441,7 +442,7 @@ def __init__(self, m, pre=None, post=None, stream=False, return_trace=False, lau
lazyllm.deploy.RelayServer.keys_name_handle,
copy.deepcopy(lazyllm.deploy.RelayServer.default_headers),
)
self._impl = _ServerModuleImpl(m, pre, post, launcher, father=self)
self._impl = _ServerModuleImpl(m, pre, post, launcher, port, father=self)

_url_id = property(lambda self: self._impl._module_id)

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "lazyllm"
version = "0.2.3"
version = "0.2.4"
description = "A Low-code Development Tool For Building Multi-agent LLMs Applications."
authors = ["wangzhihong <[email protected]>"]
license = "Apache-2.0 license"
Expand Down
2 changes: 2 additions & 0 deletions tests/advanced_tests/full_test/pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[pytest]
cache_dir = .pytest-cache
2 changes: 2 additions & 0 deletions tests/advanced_tests/standard_test/pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[pytest]
cache_dir = .pytest-cache
10 changes: 8 additions & 2 deletions tests/advanced_tests/standard_test/test_intent_classifier.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
from lazyllm.tools import IntentClassifier
import lazyllm
from lazyllm.launcher import cleanup


class TestIntentClassifier(object):
def setup_method(self):
self._llm = lazyllm.TrainableModule('internlm2-chat-7b')
@classmethod
def setup_class(cls):
cls._llm = lazyllm.TrainableModule('internlm2-chat-7b')

@classmethod
def teardown_class(cls):
cleanup()

def test_intent_classifier(self):
intent_list = ["Chat", "Financial Knowledge Q&A", "Employee Information Query", "Weather Query"]
Expand Down
20 changes: 12 additions & 8 deletions tests/advanced_tests/standard_test/test_llm_parser.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,25 @@
import unittest
from unittest.mock import MagicMock
from lazyllm import LLMParser, TrainableModule
from lazyllm.launcher import cleanup


class TestLLMParser(unittest.TestCase):
def setUp(self):
self.llm = TrainableModule("internlm2-chat-7b")
self.llm.start()

self.mock_node = MagicMock()
self.mock_node.get_text.return_value = (
@classmethod
def setup_class(cls):
cls.llm = TrainableModule("internlm2-chat-7b").start()
cls.mock_node = MagicMock()
cls.mock_node.get_text.return_value = (
"Hello, I am an AI robot developed by SenseTime, named LazyLLM. "
"My mission is to assist you in building the most powerful large-scale model applications with minimal cost."
)

self.summary_parser = LLMParser(self.llm, language="en", task_type="summary")
self.keywords_parser = LLMParser(self.llm, language="en", task_type="keywords")
cls.summary_parser = LLMParser(cls.llm, language="en", task_type="summary")
cls.keywords_parser = LLMParser(cls.llm, language="en", task_type="keywords")

@classmethod
def teardown_class(cls):
cleanup()

def test_summary_transform(self):
result = self.summary_parser.transform(self.mock_node)
Expand Down
54 changes: 26 additions & 28 deletions tests/advanced_tests/standard_test/test_trainable_fc.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,92 +119,90 @@ def LLMWorker(input: str):
@pytest.fixture()
def exe_trainable_single_function_call(request):
params = request.param if hasattr(request, 'param') else {}
model = params.get('model', None)
tools = params.get("tools", [])
query = params.get("query", "")
llm = request.cls.llm
if not query or not tools:
raise ValueError(f"query: {query} and {tools} cannot be empty.")
llm = lazyllm.TrainableModule(model).deploy_method(deploy.vllm).start()

print(f"\nStarting test 【{model}】 function calling")
print(f"\nStarting test 【{llm}】 function calling")
fc = FunctionCall(llm, tools)
ret = fc(query)
yield ret
print(f"\n{model}】 function calling test done.")
print(f"\n{llm}】 function calling test done.")

@pytest.fixture()
def exe_trainable_parallel_function_call(request):
params = request.param if hasattr(request, 'param') else {}
model = params.get('model', None)
tools = params.get("tools", [])
query = params.get("query", "")
llm = request.cls.llm
if not query or not tools:
raise ValueError(f"query: {query} and tools: {tools} cannot be empty.")
llm = lazyllm.TrainableModule(model).deploy_method(deploy.vllm).start()

agent = FunctionCallAgent(llm, tools)
print(f"\nStarting test 【{model}】parallel function calling")
print(f"\nStarting test 【{llm}】parallel function calling")
ret = agent(query)
yield ret
print(f"\n{model}】parallel function calling test done.")
print(f"\n{llm}】parallel function calling test done.")

@pytest.fixture()
def exe_trainable_advance_agent(request):
params = request.param if hasattr(request, 'param') else {}
model = params.get('model', None)
tools = params.get('tools', [])
query = params.get('query', '')
Agent = params.get('Agent', None)
llm = request.cls.llm
if not query or not tools:
raise ValueError(f"query: {query} and tools: {tools} cannot be empty.")
if Agent is None:
raise ValueError(f"Agent: {Agent} must be a valid value.")
llm = lazyllm.TrainableModule(model).deploy_method(deploy.vllm).start()

agent = Agent(llm, tools)
print(f"\nStarting test 【{model}{Agent}.")
print(f"\nStarting test 【{llm}{Agent}.")
ret = agent(query)
yield ret
print(f"\n{model}{Agent} test done.")
print(f"\n{llm}{Agent} test done.")

tools = ["get_current_weather", "get_n_day_weather_forecast"]
squery1 = "What's the weather like today in celsius in Tokyo."
squery2 = "What will the weather be like in celsius in Paris tomorrow?"
mquery1 = "What's the weather like today in celsius in Tokyo and Paris."
mquery2 = "What will the weather be like in fahrenheit in san francisco and beijing tomorrow?"
vModels = ['glm-4-9b-chat', 'qwen2-7b-instruct']
agentQuery = "What is 20+(2*4)? Calculate step by step."
rewooquery = "What is the name of the cognac house that makes the main ingredient in The Hennchata?"
agentQuery = "计算 20*(45+23)*4, Calculate step by step."
rewooquery = "美国历届总统就职时年龄最大的是谁"

class TestTrainableFunctionCall(object):
@pytest.fixture(autouse=True)
def run_around_tests(self):
yield
@classmethod
def setup_class(cls):
models = ["internlm2-chat-20b", "glm-4-9b-chat", "qwen2-7b-instruct", "qwen2-72b-instruct-awq"]
model = random.choice(models)
cls.llm = lazyllm.TrainableModule(model).deploy_method(deploy.vllm).start()

@classmethod
def teardown_class(cls):
cleanup()

@pytest.mark.parametrize("exe_trainable_single_function_call",
[{"model": random.choice(vModels), "tools": tools, "query": squery1},
{"model": random.choice(vModels), "tools": tools, "query": squery2}],
[{"tools": tools, "query": squery1},
{"tools": tools, "query": squery2}],
indirect=True)
def test_trainable_single_function_call(self, exe_trainable_single_function_call):
ret = exe_trainable_single_function_call
assert isinstance(ret, list)

@pytest.mark.parametrize("exe_trainable_parallel_function_call",
[{"model": random.choice(vModels), 'tools': tools, 'query': mquery1},
{"model": random.choice(vModels), 'tools': tools, 'query': mquery2}],
[{'tools': tools, 'query': mquery1},
{'tools': tools, 'query': mquery2}],
indirect=True)
def test_trainable_parallel_function_call(self, exe_trainable_parallel_function_call):
ret = exe_trainable_parallel_function_call
assert isinstance(ret, str)

@pytest.mark.parametrize("exe_trainable_advance_agent",
[{"model": random.choice(['internlm2-chat-20b', 'Qwen1.5-14B-Chat']),
'tools': ['multiply_tool', 'add_tool'], 'query': agentQuery, "Agent": ReactAgent},
{"model": random.choice(['internlm2-chat-20b', 'Qwen1.5-14B-Chat']),
'tools': ['multiply_tool', 'add_tool'], 'query': agentQuery, "Agent": PlanAndSolveAgent},
{"model": random.choice(['GLM-4-9B-Chat', 'Qwen2-72B-Instruct-AWQ']),
'tools': ['WikipediaWorker', 'LLMWorker'], 'query': rewooquery, "Agent": ReWOOAgent}],
[{'tools': ['multiply_tool', 'add_tool'], 'query': agentQuery, "Agent": ReactAgent},
{'tools': ['multiply_tool', 'add_tool'], 'query': agentQuery, "Agent": PlanAndSolveAgent},
{'tools': ['WikipediaWorker', 'LLMWorker'], 'query': rewooquery, "Agent": ReWOOAgent}],
indirect=True)
def test_trainable_advance_agent(self, exe_trainable_advance_agent):
ret = exe_trainable_advance_agent
Expand Down
2 changes: 2 additions & 0 deletions tests/basic_tests/pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[pytest]
cache_dir = .pytest-cache
2 changes: 2 additions & 0 deletions tests/charge_tests/pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[pytest]
cache_dir = .pytest-cache

0 comments on commit 22bf9b4

Please sign in to comment.