diff --git a/.github/workflows/deploy_doc.yml b/.github/workflows/deploy_doc.yml index 1df02d73..a367eaba 100644 --- a/.github/workflows/deploy_doc.yml +++ b/.github/workflows/deploy_doc.yml @@ -8,6 +8,10 @@ on: branches: - main +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: deploy: runs-on: tps_sco_nv diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 83b74098..c3deb411 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -8,6 +8,10 @@ on: branches: - main +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: lint: runs-on: ubuntu-latest diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3b53168f..4498e221 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -13,6 +13,10 @@ on: env: CI_PATH: '/home/mnt/platform_ci/GitHub/${{ github.repository }}/${GITHUB_RUN_NUMBER}' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: Clone: runs-on: tps_sco_nv @@ -26,23 +30,50 @@ jobs: echo ${{ env.CI_PATH }} mkdir -p ${{ env.CI_PATH }} + - name: Clean custom directory + run: | + set -ex + if [ -d "${{ env.CI_PATH }}" ]; then + rm -rf ${{ env.CI_PATH }}/* + fi + - name: Move code to custom directory run: | set -ex mv $GITHUB_WORKSPACE/* ${{ env.CI_PATH }}/ - Test: + BasicTests: runs-on: tps_sco_nv needs: [Clone] steps: - - name: test1 + - name: RunTests run: | set -ex cd ${{ env.CI_PATH }} - echo "Placeholder only" + realpath . + env | grep '^SCC' + export LAZYLLM_SCO_ENV_NAME=lazyllm + export LAZYLLM_DEFAULT_LAUNCHER=sco + export LAZYLLM_SLURM_PART=a100 + export PYTHONPATH=$PWD:$PYTHONPATH + export LAZYLLM_SCO_WORKSPACE=expert-services + export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/ + export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models + python -m pytest tests/basic_tests/ - - name: test2 + AdvancedTests: + runs-on: tps_sco_nv + needs: [Clone] + steps: + - name: RunTests run: | cd ${{ env.CI_PATH }} - which python - echo "Placeholder only" + env | grep '^SCC' + export LAZYLLM_SCO_ENV_NAME=lazyllm + export LAZYLLM_DEFAULT_LAUNCHER=sco + export LAZYLLM_SLURM_PART=a100 + export PYTHONPATH=$PWD:$PYTHONPATH + export LAZYLLM_SCO_WORKSPACE=expert-services + export LAZYLLM_DATA_PATH=/mnt/lustre/share_data/lazyllm/data/ + export LAZYLLM_MODEL_PATH=/mnt/lustre/share_data/lazyllm/models + python -m pytest tests/advanced_tests/ diff --git a/lazyllm/common/common.py b/lazyllm/common/common.py index a6fb5114..b50d67b3 100644 --- a/lazyllm/common/common.py +++ b/lazyllm/common/common.py @@ -231,16 +231,19 @@ class TimeoutException(Exception): @contextmanager def timeout(duration, *, msg=''): def raise_timeout_exception(): - raise TimeoutException(f'{msg}, block timed out after {duration} s') + event.set() + event = threading.Event() timer = threading.Timer(duration, raise_timeout_exception) timer.start() try: yield finally: - if timer.is_alive(): + if not event.is_set(): timer.cancel() + else: + raise TimeoutException(f'{msg}, block timed out after {duration} s') class ReadOnlyWrapper(object): diff --git a/lazyllm/common/logger.py b/lazyllm/common/logger.py index 0985dcd2..796413f3 100644 --- a/lazyllm/common/logger.py +++ b/lazyllm/common/logger.py @@ -95,6 +95,8 @@ def __getattr__(self, attr): return getattr(self._logger, attr) return getattr(self, attr) + def close(self): + logger.remove() LOG = _Log() diff --git a/lazyllm/components/deploy/lightllm.py b/lazyllm/components/deploy/lightllm.py index 2e7432b9..5508f598 100644 --- a/lazyllm/components/deploy/lightllm.py +++ b/lazyllm/components/deploy/lightllm.py @@ -49,6 +49,8 @@ def __init__(self, }) self.trust_remote_code = trust_remote_code self.kw.check_and_update(kw) + self.random_port = False if 'port' in kw and kw['port'] else True + self.random_nccl_port = False if 'nccl_port' in kw and kw['nccl_port'] else True def cmd(self, finetuned_model=None, base_model=None): if not os.path.exists(finetuned_model) or \ @@ -60,9 +62,9 @@ def cmd(self, finetuned_model=None, base_model=None): finetuned_model = base_model def impl(): - if not self.kw['port']: + if self.random_port: self.kw['port'] = random.randint(30000, 40000) - if not self.kw['nccl_port']: + if self.random_nccl_port: self.kw['nccl_port'] = random.randint(20000, 30000) cmd = f'python -m lightllm.server.api_server --model_dir {finetuned_model} ' cmd += self.kw.parse_kwargs() diff --git a/lazyllm/components/deploy/vllm.py b/lazyllm/components/deploy/vllm.py index 5e37be4a..3849b128 100644 --- a/lazyllm/components/deploy/vllm.py +++ b/lazyllm/components/deploy/vllm.py @@ -46,6 +46,7 @@ def __init__(self, }) self.trust_remote_code = trust_remote_code self.kw.check_and_update(kw) + self.random_port = False if 'port' in kw and kw['port'] and kw['port'] != 'auto' else True def cmd(self, finetuned_model=None, base_model=None): if not os.path.exists(finetuned_model) or \ @@ -57,7 +58,7 @@ def cmd(self, finetuned_model=None, base_model=None): finetuned_model = base_model def impl(): - if not self.kw['port'] or self.kw['port'] == 'auto': + if self.random_port: self.kw['port'] = random.randint(30000, 40000) cmd = f'{sys.executable} -m vllm.entrypoints.api_server --model {finetuned_model} ' diff --git a/lazyllm/components/finetune/alpacalora.py b/lazyllm/components/finetune/alpacalora.py index 61f4fad7..a781598e 100644 --- a/lazyllm/components/finetune/alpacalora.py +++ b/lazyllm/components/finetune/alpacalora.py @@ -1,9 +1,11 @@ -from .base import LazyLLMFinetuneBase -from lazyllm import launchers, ArgsDict, thirdparty import os import copy import random +import lazyllm +from lazyllm import launchers, ArgsDict, thirdparty +from .base import LazyLLMFinetuneBase + class AlpacaloraFinetune(LazyLLMFinetuneBase): defatult_kw = ArgsDict({ @@ -57,6 +59,10 @@ def __init__(self, def cmd(self, trainset, valset=None) -> str: thirdparty.check_packages(['datasets', 'deepspeed', 'fire', 'numpy', 'peft', 'torch', 'transformers']) + if not os.path.exists(trainset): + defatult_path = os.path.join(lazyllm.config['data_path'], trainset) + if os.path.exists(defatult_path): + trainset = defatult_path if not self.kw['data_path']: self.kw['data_path'] = trainset diff --git a/lazyllm/components/finetune/collie.py b/lazyllm/components/finetune/collie.py index 6a1c89ff..33f3dd73 100644 --- a/lazyllm/components/finetune/collie.py +++ b/lazyllm/components/finetune/collie.py @@ -1,8 +1,10 @@ -from .base import LazyLLMFinetuneBase -from lazyllm import launchers, ArgsDict, thirdparty import os import copy +import lazyllm +from lazyllm import launchers, ArgsDict, thirdparty +from .base import LazyLLMFinetuneBase + class CollieFinetune(LazyLLMFinetuneBase): defatult_kw = ArgsDict({ @@ -53,6 +55,10 @@ def __init__(self, def cmd(self, trainset, valset=None) -> str: thirdparty.check_packages(['numpy', 'peft', 'torch', 'transformers']) + if not os.path.exists(trainset): + defatult_path = os.path.join(lazyllm.config['data_path'], trainset) + if os.path.exists(defatult_path): + trainset = defatult_path if not self.kw['data_path']: self.kw['data_path'] = trainset diff --git a/lazyllm/configs.py b/lazyllm/configs.py index 43ad4e9b..01a0bcd1 100644 --- a/lazyllm/configs.py +++ b/lazyllm/configs.py @@ -61,4 +61,5 @@ def __str__(self): ).add('rag_store', str, 'none', 'RAG_STORE' ).add('redis_url', str, 'none', 'REDIS_URL' ).add('gpu_type', str, 'A100', 'GPU_TYPE' + ).add('sco_env_name', str, '', 'SCO_ENV_NAME' ) diff --git a/lazyllm/launcher.py b/lazyllm/launcher.py index 38f773de..a33cf435 100644 --- a/lazyllm/launcher.py +++ b/lazyllm/launcher.py @@ -400,6 +400,8 @@ def _wrap_cmd(self, cmd): '--master_addr ${MASTER_ADDR} --master_port ${MASTER_PORT} ' pythonpath = os.getenv('PYTHONPATH', '') precmd = f'''export PYTHONPATH={os.getcwd()}:{pythonpath}:$PYTHONPATH && ''' + if lazyllm.config['sco_env_name']: + precmd = f"source activate {lazyllm.config['sco_env_name']} && " + precmd env_vars = os.environ lazyllm_vars = {k: v for k, v in env_vars.items() if k.startswith("LAZYLLM")} if lazyllm_vars: @@ -510,6 +512,8 @@ def cleanup(): v.stop() LOG.info(f"killed job:{k}") + LOG.close() + atexit.register(cleanup) def _exitf(*args, **kw): diff --git a/lazyllm/thirdparty/__init__.py b/lazyllm/thirdparty/__init__.py index 31ba9804..763cfd4b 100644 --- a/lazyllm/thirdparty/__init__.py +++ b/lazyllm/thirdparty/__init__.py @@ -86,7 +86,7 @@ def __getattribute__(self, __name): modules = ['redis', 'huggingface_hub', 'jieba', 'llama_index', 'modelscope', 'pandas', 'jwt', 'rank_bm25', 'redisvl', 'datasets', 'deepspeed', 'fire', 'numpy', 'peft', 'torch', 'transformers', 'collie', 'faiss', 'flash_attn', 'google', 'lightllm', 'llama_index.embeddings.huggingface', 'vllm' - 'llama_index.storage.docstore.redis', 'llama_index.storage.index.store.redis', 'wandb' + 'llama_index.storage.docstore.redis', 'llama_index.storage.index.store.redis', 'wandb', 'llama_index.storage.kvstore.redis', 'llama_index.retrievers.bm25', 'sklearn', 'torchvision'] for m in modules: vars()[m] = PackageWrapper(m) diff --git a/lazyllm/tools/webpages/webmodule.py b/lazyllm/tools/webpages/webmodule.py index 2f7a1860..da373625 100644 --- a/lazyllm/tools/webpages/webmodule.py +++ b/lazyllm/tools/webpages/webmodule.py @@ -268,6 +268,11 @@ def wait(self): if hasattr(self, 'p'): return self.p.join() + def stop(self): + if self.p and self.p.is_alive(): + self.p.terminate() + self.p.join() + def __repr__(self): return lazyllm.make_repr('Module', 'Web', name=self._module_name, subs=[repr(self.m)]) diff --git a/tests/advanced_tests/test_deploy.py b/tests/advanced_tests/test_deploy.py new file mode 100644 index 00000000..02cec6ef --- /dev/null +++ b/tests/advanced_tests/test_deploy.py @@ -0,0 +1,42 @@ +import json + +import lazyllm +from lazyllm import deploy +from lazyllm.launcher import cleanup + +class TestDeploy(object): + + def setup_method(self): + self.model_path = 'internlm2-chat-7b' + self.inputs = ['介绍一下你自己', '李白和李清照是什么关系', '说个笑话吧'] + + def test_deploy_lightllm(self): + m = lazyllm.TrainableModule(self.model_path, '').deploy_method(deploy.lightllm) + m.evalset(self.inputs) + m.update_server() + m.eval() + assert len(m.eval_result) == len(self.inputs) + cleanup() + + def test_deploy_vllm(self): + m = lazyllm.TrainableModule(self.model_path, '').deploy_method(deploy.vllm) + m.evalset(self.inputs) + m.update_server() + m.eval() + assert len(m.eval_result) == len(self.inputs) + cleanup() + + def test_deploy_auto(self): + m = lazyllm.TrainableModule(self.model_path, '').deploy_method(deploy.AutoDeploy) + m.evalset(self.inputs) + m.update_server() + m.eval() + assert len(m.eval_result) == len(self.inputs) + cleanup() + + def test_embedding(self): + m = lazyllm.TrainableModule('bge-large-zh-v1.5').deploy_method(deploy.AutoDeploy) + m.update_server() + res = m('你好') + assert len(json.loads(res)) == 1024 + cleanup() diff --git a/tests/advanced_tests/test_finetune.py b/tests/advanced_tests/test_finetune.py new file mode 100644 index 00000000..b701dec1 --- /dev/null +++ b/tests/advanced_tests/test_finetune.py @@ -0,0 +1,15 @@ + +class TestFinetune(object): + + def setup_method(self): + self.data = 'alpaca/alpaca_data_zh_128.json' + self.model_path = 'internlm2-chat-7b' + + def test_finetune_alpacalora(self): + pass + + def test_finetune_collie(self): + pass + + def test_finetune_auto(self): + pass diff --git a/tests/basic_tests/conftest.py b/tests/basic_tests/conftest.py new file mode 100644 index 00000000..694d7d58 --- /dev/null +++ b/tests/basic_tests/conftest.py @@ -0,0 +1 @@ +pytest_plugins = "pytester" diff --git a/tests/basic_tests/test_common.py b/tests/basic_tests/test_common.py new file mode 100644 index 00000000..64c536e4 --- /dev/null +++ b/tests/basic_tests/test_common.py @@ -0,0 +1,83 @@ +import lazyllm +from lazyllm.common import ArgsDict +import random +import time +import pytest + +class TestCommon(object): + + def test_common_argsdict(self): + + my_ob = ArgsDict({'a': '1', 'b': '2'}) + my_ob.check_and_update(my_ob) + expected_output = '--a="1" --b="2"' + assert my_ob.parse_kwargs() == expected_output + + def test_common_bind(self): + + def exam(a, b, c): + return [a, b, c] + + num_list = [random.randint(1, 10) for _ in range(3)] + r1 = lazyllm.bind(exam, num_list[0], lazyllm._0, num_list[2]) + ret_list = r1(num_list[1]) + assert ret_list == num_list + + def test_common_cmd(self): + + ret = lazyllm.LazyLLMCMD('python a --a=b --c=d', no_displays=['a']) + assert str(ret) == 'python a --c=d' + + ret = lazyllm.LazyLLMCMD('python a --a=b --c=d', no_displays=['c']) + assert str(ret) == 'python a --a=b ' + + ret = lazyllm.LazyLLMCMD('python a --a=b --c=d', no_displays=['d']) + assert str(ret) == 'python a --a=b --c=d' + + def test_common_timeout(self): + from lazyllm.common.common import TimeoutException + + with pytest.raises(TimeoutException): + with lazyllm.timeout(1, msg='hello'): + time.sleep(2) + + def test_common_tread(self): + + def is_equal2(x): + if x == 2: + return x + else: + raise Exception + + ts = [lazyllm.Thread(target=is_equal2, args=(inp, )) for inp in [2, 3]] + [t.start() for t in ts] + + assert ts[0].get_result() == 2 + with pytest.raises(Exception): + ts[1].get_result() + + def test_common_llmreqreshelper(self): + + h = lazyllm.ReqResHelper() + assert h.make_request(1, a=3, b=2) + assert h.make_request(1, 2, a=3) + + r1 = lazyllm.LazyLlmResponse(messages=1, trace='t1') + r2 = lazyllm.LazyLlmResponse(messages=2, trace='t2') + assert h.make_request(r1) + assert h.make_request(r2) + assert h.trace == 't1t2' + + assert h.make_response('abc') + assert h.trace == 't1t2' + + r3 = lazyllm.LazyLlmResponse(messages=3, trace='t3') + assert h.make_response(r3) + assert h.trace == 't1t2' + + def test_common_makerepr(self): + + r1 = lazyllm.make_repr('a', 1) + r2 = lazyllm.make_repr('b', 2) + rr = lazyllm.make_repr('c', 3, subs=[r1, r2]) + assert rr == '\n |- \n └- \n' diff --git a/tests/basic_tests/test_component.py b/tests/basic_tests/test_component.py new file mode 100644 index 00000000..6b39e4bc --- /dev/null +++ b/tests/basic_tests/test_component.py @@ -0,0 +1,74 @@ +import os + +import lazyllm +from lazyllm import finetune, deploy, launchers + + +class TestComponent(object): + def test_prompter(self): + p = lazyllm.Prompter(prompt='hello world2 <{input}>') + assert not p.is_empty(), "Prompter should not be empty" + + def test_generate_prompt(self): + p = lazyllm.Prompter(prompt='hello world2 <{input}>') + result = p.generate_prompt('123') + assert result == 'hello world2 <123>', f"Expected 'hello world2 <123>', but got '{result}'" + + def test_generate_prompt_dict_input(self): + p = lazyllm.Prompter(prompt='hello world2 <{input}>') + result_dict_input = p.generate_prompt({'input': '123'}) + assert result_dict_input == 'hello world2 <123>', \ + f"Expected 'hello world2 <123>', but got '{result_dict_input}'" + + def test_from_template(self): + p = lazyllm.Prompter.from_template('alpaca') + expected_prompt = ( + "Below is an instruction that describes a task, paired with an input that provides further context. " + "Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### " + "Input:\n{input}\n\n### Response:\n" + ) + assert p._prompt == expected_prompt, f"Expected prompt to be '{expected_prompt}', but got '{p._prompt}'" + + def test_generate_prompt_with_template(self): + p = lazyllm.Prompter.from_template('alpaca') + result = p.generate_prompt(dict(instruction='ins', input='inp')) + expected_result = ( + "Below is an instruction that describes a task, paired with an input that provides further context. " + "Write a response that appropriately completes the request.\n\n### Instruction:\nins\n\n### " + "Input:\ninp\n\n### Response:\n" + ) + assert result == expected_result, f"Expected '{expected_result}', but got '{result}'" + + def test_finetune_alpacalora(self): + # test instantiation + f = finetune.alpacalora(base_model='internlm2-chat-7b', target_path='') + assert f.base_model == 'internlm2-chat-7b' + + def test_finetune_collie(self): + # test instantiation + f = finetune.collie(base_model='internlm2-chat-7b', target_path='') + assert f.base_model == 'internlm2-chat-7b' + + def test_deploy_lightllm(self): + # test instantiation + m = deploy.lightllm(trust_remote_code=False, launcher=launchers.sco) + assert not m.trust_remote_code + assert isinstance(m.launcher, launchers.sco) + + def test_deploy_vllm(self): + # test instantiation + m = deploy.vllm(trust_remote_code=False, launcher=launchers.sco) + assert not m.trust_remote_code + assert isinstance(m.launcher, launchers.sco) + + def test_auto_finetune(self): + # test instantiation + m = finetune.auto('internlm2-chat-7b', '', launcher=launchers.sco(ngpus=1)) + assert isinstance(m.launcher, launchers.sco) + assert os.path.exists(m.base_model) + + def test_auto_deploy(self): + # test instantiation + m = deploy.auto('internlm2-chat-7b', trust_remote_code=False, launcher=launchers.sco(ngpus=1)) + assert not m.trust_remote_code + assert isinstance(m.launcher, launchers.sco) diff --git a/tests/basic_tests/test_config.py b/tests/basic_tests/test_config.py new file mode 100644 index 00000000..1c39e406 --- /dev/null +++ b/tests/basic_tests/test_config.py @@ -0,0 +1,86 @@ +import lazyllm +from lazyllm.configs import Mode +import os +import pytest +import contextlib +import inspect + + +isolate_env = "PARROTS_ISOLATE_STATUS" + + +def isolated(func): + def run_subprocess(self, pytester): + # python_path = os.path.dirname(inspect.getfile(sys.modules[__name__])) + file_path = inspect.getfile(self.__class__) + class_name = self.__class__.__name__ + method_name = func.__name__ + test_func = file_path + '::' + class_name + '::' + method_name + with clear_env(): + with set_env(isolate_env, "IN_SUBPROCESS"): + result = pytester.runpytest_subprocess(test_func) + assert result.ret == 0 + + if not inspect.isfunction(func): + raise TypeError("Decorator 'isolated' can only decorate functions.") + + fn_code = func.__code__ + if 'self' not in fn_code.co_varnames or fn_code.co_argcount != 1: + raise TypeError("Decorated function should be method and " + "have exactly one argument 'self'.") + + isolate_status = os.getenv(isolate_env) + if isolate_status == "IN_SUBPROCESS": + return func + # set environ variable to 'OFF' to skip all isolated tests. + elif isolate_status == "OFF": + return pytest.mark.skip(func) + else: + return pytest.mark.isolate(run_subprocess) + + +class TestConfig(object): + def test_config_mode(self): + print(os.environ.get('LAZYLLM_DISPLAY')) + assert lazyllm.config['mode'] == Mode.Normal + + @isolated + def test_config_disp(self): + print(os.environ.get('LAZYLLM_DISPLAY')) + assert lazyllm.config['mode'] == Mode.Display + +@contextlib.contextmanager +def clear_env(): + LAZYLLM_DISPLAY = "LAZYLLM_DISPLAY" + + env_list = [ + LAZYLLM_DISPLAY, + ] + env_flags = [os.getenv(env) for env in env_list] + print(env_flags) + for env, flag in zip(env_list, env_flags): + if flag is not None: + os.environ[env] = "" + if os.getenv(env) is not None: + del os.environ[env] + + yield + + for env, flag in zip(env_list, env_flags): + if flag is not None: + os.environ[env] = flag + + +@contextlib.contextmanager +def set_env(environ, value): + assert isinstance(value, str) + original_value = os.getenv(environ) + os.environ['LAZYLLM_DISPLAY'] = '1' + + os.environ[environ] = value + yield + + if original_value is None: + os.environ.pop(environ) + else: + os.environ[environ] = original_value diff --git a/tests/basic_tests/test_flow.py b/tests/basic_tests/test_flow.py new file mode 100644 index 00000000..895a4627 --- /dev/null +++ b/tests/basic_tests/test_flow.py @@ -0,0 +1,84 @@ +from lazyllm import pipeline, parallel, diverter, warp, switch, ifs, loop, barrier + +def add_one(x): return x + 1 +def is_1(x): return True if x == 1 else False +def is_2(x): return True if x == 2 else False +def t1(x): return 2 * x +def t2(x): return 3 * x +def t3(x): return x + +class TestFlow(object): + + def test_pipeline(self): + + fl = pipeline(add_one, add_one)(1) + assert fl == 3 + + def test_parallel(self): + fl = parallel(add_one, add_one)(1) + assert fl == (2, 2) + + def test_parallel_sequential(self): + fl = parallel.sequential(add_one, add_one)(1) + assert fl == (2, 2) + + def test_diverter(self): + + fl = diverter(add_one, add_one)(1, 2) + assert fl == (2, 3) + + def test_warp(self): + + fl = warp(add_one)(1, 2, 3) + assert fl == (2, 3, 4) + + def test_switch(self): + + assert switch({is_1: t1, is_2: t2})(1) == 2 + assert switch({is_1: t1, is_2: t2})(2) == 6 + assert not switch({is_1: t1, is_2: t2})(3) + assert switch({is_1: t1, is_2: t2, 'default': t3})(3) == 3 + + def test_ifs(self): + + assert ifs(is_1, t3, t1)(1) == 1 + assert ifs(is_1, t3, t1)(2) == 4 + + def test_loop(self): + + assert loop(add_one, count=2)(0) == 2 + # assert loop(add_one, stop_condition=is_1)(0) + + def test_barrier(self): + res = [] + + def get_data(idx): + res.append(str(idx)) + return idx + 1 + + ppl = pipeline( + get_data, + parallel( + pipeline( + get_data, + barrier, + get_data, + barrier, + get_data, + get_data, + ), + pipeline( + get_data, + barrier, + get_data, + get_data, + get_data, + get_data, + barrier, + get_data, + ), + ), + ) + + ppl(0) + print(res, res == ['0', '1', '1', '2', '3', '4', '5', '2', '6', '3', '4']) diff --git a/tests/basic_tests/test_launcher.py b/tests/basic_tests/test_launcher.py new file mode 100644 index 00000000..53223aed --- /dev/null +++ b/tests/basic_tests/test_launcher.py @@ -0,0 +1,59 @@ +import os + +import lazyllm +from lazyllm import launchers + + +class TestLauncher(object): + + def test_slurm(self): + launcher = launchers.slurm( + partition='pat_rd', + nnode=1, + nproc=1, + ngpus=1, + sync=False + ) + assert launcher.partition == 'pat_rd' + + def test_empty(self): + launcher = launchers.empty() + assert not launcher.subprocess + + def test_sco(self): + launcher = launchers.sco( + partition='pat_rd', + nnode=1, + nproc=1, + ngpus=1, + sync=False + ) + assert launcher.partition == 'pat_rd' + + def test_remote(self): + # empty launcher + origin_launcher = lazyllm.config.impl['launcher'] + os.environ["LAZYLLM_DEFAULT_LAUNCHER"] = 'empty' + lazyllm.config.add('launcher', str, 'empty', 'DEFAULT_LAUNCHER') + launcher = launchers.remote( + sync=False + ) + assert type(launcher) is launchers.empty + assert not launcher.sync + os.environ["LAZYLLM_DEFAULT_LAUNCHER"] = 'slurm' + lazyllm.config.add('launcher', str, 'empty', 'DEFAULT_LAUNCHER') + launcher = launchers.remote( + sync=False + ) + assert type(launcher) is launchers.slurm + assert not launcher.sync + os.environ["LAZYLLM_DEFAULT_LAUNCHER"] = 'sco' + lazyllm.config.add('launcher', str, 'empty', 'DEFAULT_LAUNCHER') + launcher = launchers.remote( + sync=False + ) + assert type(launcher) is launchers.sco + assert not launcher.sync + + os.environ["LAZYLLM_DEFAULT_LAUNCHER"] = origin_launcher + lazyllm.config.add('launcher', str, 'empty', 'DEFAULT_LAUNCHER') diff --git a/tests/basic_tests/test_module.py b/tests/basic_tests/test_module.py new file mode 100644 index 00000000..02e0fcc0 --- /dev/null +++ b/tests/basic_tests/test_module.py @@ -0,0 +1,69 @@ + +import time +import requests + +import lazyllm +from lazyllm.launcher import cleanup + +class TestModule: + + def setup_method(self): + self.base_model = 'internlm2-chat-7b' + self.target_path = '' + self.data_path = 'data_path' + + def test_ActionModule(self): + action_module = lazyllm.ActionModule(lambda x: x + 1) + assert action_module(1) == 2 + assert action_module(10) == 11 + + def test_UrlModule(self): + def func(x): + return str(x) + ' after' + # Generate accessible URL service: + m1 = lazyllm.ServerModule(func) + m1.update() + + m2 = lazyllm.UrlModule(url=m1._url) + assert m2._url == m1._url + m2.evalset([1, 'hi']) + m2.update() + assert m2.eval_result == ['1 after', 'hi after'] + cleanup() + + def test_ServerModule(self): + server_module = lazyllm.ServerModule(lambda x: x.upper()) + server_module.start() + assert server_module('hello') == 'HELLO' + server_module.evalset(['input1', 'input2']) + server_module.eval() + assert server_module.eval_result == ['INPUT1', 'INPUT2'] + cleanup() + + def test_TrainableModule(self): + trainable_module = lazyllm.TrainableModule(self.base_model, self.target_path) + trainable_module.finetune_method(lazyllm.finetune.dummy) + trainable_module.deploy_method(lazyllm.deploy.dummy) + trainable_module.mode('finetune') + trainable_module.trainset(self.data_path) + trainable_module.prompt(prompt=None) + trainable_module.update() + res_template = "reply for {}, and parameters is {{'do_sample': False, 'temperature': 0.1}}" + inputs = 'input' + assert trainable_module(inputs) == res_template.format(inputs) + inputs = ['input1', 'input2'] + trainable_module.evalset(['input1', 'input2']) + trainable_module.eval() + assert trainable_module.eval_result == [res_template.format(x) for x in inputs] + cleanup() + + def test_WebModule(self): + def func(x): + return 'reply ' + x + m = lazyllm.WebModule(func) + m.update() + time.sleep(4) + assert m.p.is_alive() + response = requests.get(m.url) + assert response.status_code == 200 + m.stop() diff --git a/tests/basic_tests/test_option.py b/tests/basic_tests/test_option.py new file mode 100644 index 00000000..d416c120 --- /dev/null +++ b/tests/basic_tests/test_option.py @@ -0,0 +1,29 @@ +import lazyllm + +class TestOption(object): + + def test_option(self): + l1 = [1, 2] + l2 = [3, 4, 5] + o1 = lazyllm.Option(l1) + o2 = lazyllm.Option(l2) + + expected_output = [[1, 3], [1, 4], [1, 5], [2, 3], [2, 4], [2, 5]] + assert list(lazyllm.OptionIter([o1, o2])) == expected_output + + def test_test(self): + + def get_options(x): + if isinstance(x, lazyllm.Option): + return [x] + else: + return [] + o1 = lazyllm.Option([1, 2]) + o2 = lazyllm.Option([o1, 3, 4]) + o3 = lazyllm.Option([5, 6]) + + expected_output = ('[[