From cff0a4dfbb24668345c6075f791e1f51126600a0 Mon Sep 17 00:00:00 2001 From: lvliang-intel Date: Wed, 11 Sep 2024 22:26:43 +0800 Subject: [PATCH] Support export megaservice yaml to docker compose file (#642) * Support export megaservice yaml tp docker compose file Signed-off-by: lvliang-intel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add ut case Signed-off-by: lvliang-intel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix issue Signed-off-by: lvliang-intel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add code spell ignore Signed-off-by: lvliang-intel * fix ut issue Signed-off-by: lvliang-intel * disable cli case Signed-off-by: lvliang-intel --------- Signed-off-by: lvliang-intel Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/code_spell_ignore.txt | 3 + comps/cores/mega/cli.py | 61 ++++ comps/cores/mega/exporter.py | 264 ++++++++++++++++++ setup.py | 1 + tests/cores/mega/mega.yaml | 117 ++++++++ .../cores/mega/test_export_docker_compose.py | 85 ++++++ 6 files changed, 531 insertions(+) create mode 100644 comps/cores/mega/cli.py create mode 100644 comps/cores/mega/exporter.py create mode 100644 tests/cores/mega/mega.yaml create mode 100644 tests/cores/mega/test_export_docker_compose.py diff --git a/.github/code_spell_ignore.txt b/.github/code_spell_ignore.txt index e69de29bb..ab5525038 100644 --- a/.github/code_spell_ignore.txt +++ b/.github/code_spell_ignore.txt @@ -0,0 +1,3 @@ +assertIn +assertEqual + diff --git a/comps/cores/mega/cli.py b/comps/cores/mega/cli.py new file mode 100644 index 000000000..11afc0839 --- /dev/null +++ b/comps/cores/mega/cli.py @@ -0,0 +1,61 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import argparse + +from .exporter import convert_to_docker_compose + + +def export_kubernetes_manifests(mega_yaml, output_dir, device="cpu"): + print(f"Generating Kubernetes manifests from {mega_yaml} to {output_dir}") + # Add your logic to convert the YAML to Kubernetes manifest here + + +def export_docker_compose(mega_yaml, output_file, device="cpu"): + print(f"Generating Docker Compose file from {mega_yaml} to {output_file}") + convert_to_docker_compose(mega_yaml, output_file, device) + + +def opea_execute(): + parser = argparse.ArgumentParser(description="OPEA CLI tool") + subparsers = parser.add_subparsers(dest="command", help="commands") + + # Subcommand for export + export_parser = subparsers.add_parser("export", help="Export resources") + + # Subparsers for export to docker-compose and kubernetes + export_subparsers = export_parser.add_subparsers(dest="export_command", help="Export commands") + + # Export to Docker Compose + compose_parser = export_subparsers.add_parser("docker-compose", help="Export to Docker Compose") + compose_parser.add_argument("mega_yaml", help="Path to the mega YAML file") + compose_parser.add_argument("output_file", help="Path to the Docker Compose file") + compose_parser.add_argument( + "--device", choices=["cpu", "gaudi", "xpu", "gpu"], default="cpu", help="Device type to use (default: cpu)" + ) + + # Export to Kubernetes + kube_parser = export_subparsers.add_parser("kubernetes", help="Export to Kubernetes") + kube_parser.add_argument("mega_yaml", help="Path to the mega YAML file") + kube_parser.add_argument("output_dir", help="Directory to store generated Kubernetes manifests") + kube_parser.add_argument( + "--device", choices=["cpu", "gaudi", "xpu", "gpu"], default="cpu", help="Device type to use (default: cpu)" + ) + + # Parse arguments + args = parser.parse_args() + + # Execute appropriate command + if args.command == "export": + if args.export_command == "docker-compose": + export_docker_compose(args.mega_yaml, args.output_file, args.device) + elif args.export_command == "kubernetes": + export_kubernetes_manifests(args.mega_yaml, args.output_dir, args.device) + else: + parser.print_help() + else: + parser.print_help() + + +if __name__ == "__main__": + opea_execute() diff --git a/comps/cores/mega/exporter.py b/comps/cores/mega/exporter.py new file mode 100644 index 000000000..bcf00b216 --- /dev/null +++ b/comps/cores/mega/exporter.py @@ -0,0 +1,264 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import copy +import os + +import yaml + + +def convert_to_docker_compose(mega_yaml, output_file, device="cpu"): + with open(mega_yaml, "r") as f: + mega_config = yaml.safe_load(f) + + services = {} + env_vars = mega_config.get("environment_variables", {}) + + # Define environment variable mapping for specific services + env_var_rename = {"data_prep": {"TEI_EMBEDDING_ENDPOINT": "TEI_ENDPOINT"}} + + for service_name, service_config in mega_config["opea_micro_services"].items(): + for container_name, container_info in service_config.items(): + safe_container_name = container_name.replace("/", "-") + + # Initialize environment variables by combining 'common' with specific ones + environment = copy.deepcopy(env_vars.get("common", {})) # Start with 'common' vars + # Service-specific environment (based on anchors like redis, tei_embedding, etc.) + service_envs = container_info.get("environment", {}) # The environment anchors in the YAML + for key, value in service_envs.items(): + environment[key] = value # Update the environment with specific variables + + # Apply the renaming logic using the env_var_rename mapping + renamed_environment = {} + for key, value in environment.items(): + # If the key needs to be renamed, rename it using the mapping + if key in env_var_rename.get(service_name, {}): + renamed_environment[env_var_rename[service_name][key]] = value + else: + renamed_environment[key] = value + + # Replace placeholders with actual values + for key in renamed_environment: + if ( + isinstance(renamed_environment[key], str) + and renamed_environment[key].startswith("${") + and renamed_environment[key].endswith("}") + ): + var_name = renamed_environment[key][2:-1] + renamed_environment[key] = os.getenv(var_name, renamed_environment[key]) + + service_entry = { + "image": f"{container_name}:{container_info['tag']}", + "container_name": f"{safe_container_name}-server", + "ports": [], + "ipc": "host", + "restart": "unless-stopped", + "environment": renamed_environment, + } + + # Add ports and special settings + if service_name == "embedding": + service_entry["ports"].append("6000:6000") + elif service_name == "retrieval": + service_entry["ports"].append("7000:7000") + elif service_name == "reranking": + service_entry["ports"].append("8000:8000") + elif service_name == "llm": + service_entry["ports"].append("9000:9000") + + # Add depends_on if necessary + if container_name == "opea/dataprep-redis": + service_entry["depends_on"] = ["redis-vector-db"] + service_entry["ports"].append("6007:6007") + elif container_name == "opea/embedding-tei": + service_entry["depends_on"] = ["tei-embedding-service"] + + # Add volumes for specific services + if "volume" in container_info: + service_entry["volumes"] = container_info["volume"] + + services[safe_container_name] = service_entry + + # Additional services like redis + services["redis-vector-db"] = { + "image": "redis/redis-stack:7.2.0-v9", + "container_name": "redis-vector-db", + "ports": ["6379:6379", "8001:8001"], + } + + # Process embedding service + embedding_service = mega_config["opea_micro_services"].get("embedding", {}).get("opea/embedding-tei", {}) + if embedding_service: + embedding_dependencies = embedding_service.get("dependency", {}) + for dep_name, dep_info in embedding_dependencies.items(): + if dep_name == "ghcr.io/huggingface/text-embeddings-inference": + if device == "cpu": + model_id = dep_info.get("requirements", {}).get("model_id", "") + services["text-embeddings-inference-service"] = { + "image": f"{dep_name}:{dep_info['tag']}", + "container_name": "text-embeddings-inference-server", + "ports": ["8090:80"], + "ipc": "host", + "environment": { + **env_vars.get("common", {}), + "HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), + }, + "command": f"--model-id {model_id} --auto-truncate", + } + elif dep_name == "opea/tei-gaudi": + if device == "gaudi": + model_id = dep_info.get("requirements", {}).get("model_id", "") + services["text-embeddings-inference-service"] = { + "image": f"{dep_name}:{dep_info['tag']}", + "container_name": "text-embeddings-inference-server", + "ports": ["8090:80"], + "ipc": "host", + "environment": { + **env_vars.get("common", {}), + "HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), + }, + "command": f"--model-id {model_id} --auto-truncate", + } + # Add specific settings for Habana (Gaudi) devices + services["text-embeddings-inference-service"]["runtime"] = "habana" + services["text-embeddings-inference-service"]["cap_add"] = ["SYS_NICE"] + services["text-embeddings-inference-service"]["environment"].update( + { + "HABANA_VISIBLE_DEVICES": "all", + "OMPI_MCA_btl_vader_single_copy_mechanism": "none", + "MAX_WARMUP_SEQUENCE_LENGTH": "512", + "INIT_HCCL_ON_ACQUIRE": "0", + "ENABLE_EXPERIMENTAL_FLAGS": "true", + } + ) + + # Reranking service handling + reranking_service = mega_config["opea_micro_services"].get("reranking", {}).get("opea/reranking-tei", {}) + if reranking_service: + rerank_dependencies = reranking_service.get("dependency", {}) + for dep_name, dep_info in rerank_dependencies.items(): + if dep_name == "ghcr.io/huggingface/text-embeddings-inference": + if device == "cpu": + model_id = dep_info.get("requirements", {}).get("model_id", "") + services["tei-reranking-service"] = { + "image": f"{dep_name}:{dep_info['tag']}", + "container_name": "tei-reranking-server", + "ports": ["8808:80"], + "volumes": ["./data:/data"], + "shm_size": "1g", + "environment": { + **env_vars.get("common", {}), + "HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), + "HF_HUB_DISABLE_PROGRESS_BARS": "1", + "HF_HUB_ENABLE_HF_TRANSFER": "0", + }, + "command": f"--model-id {model_id} --auto-truncate", + } + elif dep_name == "opea/tei-gaudi": + if device == "gaudi": + model_id = dep_info.get("requirements", {}).get("model_id", "") + services["tei-reranking-service"] = { + "image": f"{dep_name}:{dep_info['tag']}", + "container_name": "tei-reranking-gaudi-server", + "ports": ["8808:80"], + "volumes": ["./data:/data"], + "shm_size": "1g", + "environment": { + **env_vars.get("common", {}), + "HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), + "HF_HUB_DISABLE_PROGRESS_BARS": "1", + "HF_HUB_ENABLE_HF_TRANSFER": "0", + }, + "command": f"--model-id {model_id} --auto-truncate", + } + # Add specific settings for Habana (Gaudi) devices + services["tei-reranking-service"]["runtime"] = "habana" + services["tei-reranking-service"]["cap_add"] = ["SYS_NICE"] + services["tei-reranking-service"]["environment"].update( + { + "HABANA_VISIBLE_DEVICES": "all", + "OMPI_MCA_btl_vader_single_copy_mechanism": "none", + "MAX_WARMUP_SEQUENCE_LENGTH": "512", + "INIT_HCCL_ON_ACQUIRE": "0", + "ENABLE_EXPERIMENTAL_FLAGS": "true", + } + ) + + # LLM service + llm_service = mega_config["opea_micro_services"].get("llm", {}).get("opea/llm-tgi", {}) + if llm_service: + llm_dependencies = llm_service.get("dependency", {}) + for dep_name, dep_info in llm_dependencies.items(): + if dep_name == "ghcr.io/huggingface/text-generation-inference": + if device == "cpu": + model_id = dep_info.get("requirements", {}).get("model_id", "") + services["llm-service"] = { + "image": f"{dep_name}:{dep_info['tag']}", + "container_name": "llm-server", + "ports": ["9001:80"], + "environment": { + **env_vars.get("common", {}), + "HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), + }, + "command": f"--model-id {model_id} --max-input-length 1024 --max-total-tokens 2048", + } + elif dep_name == "ghcr.io/huggingface/tgi-gaudi": + if device == "gaudi": + model_id = dep_info.get("requirements", {}).get("model_id", "") + services["llm-service"] = { + "image": f"{dep_name}:{dep_info['tag']}", + "container_name": "llm-server", + "ports": ["9001:80"], + "environment": { + **env_vars.get("common", {}), + "HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), + }, + "command": f"--model-id {model_id} --max-input-length 1024 --max-total-tokens 2048", + } + # Add specific settings for Habana (Gaudi) devices + services["llm-service"]["runtime"] = "habana" + services["llm-service"]["cap_add"] = ["SYS_NICE"] + services["llm-service"]["environment"].update( + { + "HABANA_VISIBLE_DEVICES": "all", + "OMPI_MCA_btl_vader_single_copy_mechanism": "none", + } + ) + + # Extract configuration for all examples from 'opea_mega_service' + examples = ["chatqna", "faqgen", "audioqna", "visualqna", "codegen", "codetrans"] + for example in examples: + service_name = f"opea/{example}" + ui_service_name = f"opea/{example}-ui" + + # Process both the main service and the UI service + for service in [service_name, ui_service_name]: + # Check if the service exists in the mega.yaml + if service in mega_config.get("opea_mega_service", {}): + service_config = mega_config["opea_mega_service"][service] + container_name = service + safe_container_name = container_name.replace("/", "-") + tag = service_config.get("tag", "latest") + environment = {**env_vars.get("common", {}), **service_config.get("environment", {})} + + service_entry = { + "image": f"{container_name}:{tag}", + "container_name": f"{safe_container_name}-server", + "ports": ["5173:5173"] if "-ui" in service else ["8888:8888"], + "ipc": "host", + "restart": "unless-stopped", + "environment": environment, + } + services[safe_container_name] = service_entry + + docker_compose = { + "version": "3.8", + "services": services, + "networks": {"default": {"driver": "bridge"}}, + } + + # Write to docker-compose.yaml + with open(output_file, "w") as f: + yaml.dump(docker_compose, f, default_flow_style=False) + + print("Docker Compose file generated:", output_file) diff --git a/setup.py b/setup.py index 619e6272c..174310373 100644 --- a/setup.py +++ b/setup.py @@ -68,6 +68,7 @@ def get_build_version(): package_data={"": ["*.yaml", "../*.py"]}, include_package_data=True, install_requires=fetch_requirements("requirements.txt"), + entry_points={"console_scripts": ["opea = comps.cores.mega.cli:opea_execute"]}, python_requires=">=3.8.0", classifiers=[ "Intended Audience :: Science/Research", diff --git a/tests/cores/mega/mega.yaml b/tests/cores/mega/mega.yaml new file mode 100644 index 000000000..02ad13d8c --- /dev/null +++ b/tests/cores/mega/mega.yaml @@ -0,0 +1,117 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +environment_variables: + common: &common + no_proxy: "localhost" + http_proxy: "" + https_proxy: "" + HUGGINGFACEHUB_API_TOKEN: "" + + redis: &redis + REDIS_URL: "redis://localhost:6379" + INDEX_NAME: "rag-redis" + + tei_embedding: &tei_embedding + TEI_EMBEDDING_ENDPOINT: "http://localhost:8090" + + tei_reranking: &tei_reranking + TEI_RERANKING_ENDPOINT: "http://localhost:8808" + + llm: &llm + TGI_LLM_ENDPOINT: "http://localhost:8005" + + chatqna: &chatqna + MEGA_SERVICE_HOST_IP: localhost + EMBEDDING_SERVICE_HOST_IP: localhost + RETRIEVER_SERVICE_HOST_IP: localhost + RERANK_SERVICE_HOST_IP: localhost + LLM_SERVICE_HOST_IP: localhost + + ui: &ui + CHAT_BASE_URL: "http://localhost:8888/v1/chatqna" + DATAPREP_SERVICE_ENDPOINT: "http://localhost:6007/v1/dataprep" + DATAPREP_GET_FILE_ENDPOINT: "http://localhost:6007/v1/dataprep/get_file" + DATAPREP_DELETE_FILE_ENDPOINT: "http://localhost:6007/v1/dataprep/delete_file" + +opea_micro_services: + embedding: + opea/embedding-tei: + tag: latest + type: cpu + dependency: + ghcr.io/huggingface/text-embeddings-inference: + tag: cpu-1.5 + type: cpu + requirements: + model_id: "BAAI/bge-base-en-v1.5" + opea/tei-gaudi: + tag: latest + type: hpu + requirements: + model_id: "BAAI/bge-base-en-v1.5" + environment: + <<: [*common, *tei_embedding] + + llm: + opea/llm-tgi: + tag: latest + type: cpu + dependency: + ghcr.io/huggingface/text-generation-inference: + tag: 2.2.0 + type: cpu + requirements: + model_id: "Intel/neural-chat-7b-v3-3" + ghcr.io/huggingface/tgi-gaudi: + tag: 2.0.4 + type: hpu + requirements: + model_id: "Intel/neural-chat-7b-v3-3" + environment: + <<: [*common, *llm] + + data_prep: + opea/dataprep-redis: + tag: latest + type: cpu + environment: + <<: [*common, *redis, *tei_embedding] + + retrieval: + opea/retriever-redis: + tag: latest + type: cpu + environment: + <<: [*common, *redis] + + reranking: + opea/reranking-tei: + tag: latest + type: cpu + dependency: + ghcr.io/huggingface/text-embeddings-inference: + tag: cpu-1.5 + type: cpu + requirements: + model_id: "BAAI/bge-reranker-base" + opea/tei-gaudi: + tag: latest + type: hpu + requirements: + model_id: "BAAI/bge-reranker-base" + environment: + <<: [*common, *tei_reranking] + +opea_mega_service: + opea/chatqna: + tag: latest + type: cpu + environment: + <<: [*common, *chatqna] + + opea/chatqna-ui: + tag: latest + type: cpu + environment: + <<: [*common, *ui] diff --git a/tests/cores/mega/test_export_docker_compose.py b/tests/cores/mega/test_export_docker_compose.py new file mode 100644 index 000000000..d79c25511 --- /dev/null +++ b/tests/cores/mega/test_export_docker_compose.py @@ -0,0 +1,85 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import subprocess +import unittest + +import yaml + +from comps.cores.mega.exporter import convert_to_docker_compose + + +class TestConvertToDockerCompose(unittest.TestCase): + + def setUp(self): + self.test_dir = os.path.dirname(os.path.abspath(__file__)) + # Paths for the `mega.yaml` and output file + self.mega_yaml = os.path.join(self.test_dir, "mega.yaml") + self.output_file = os.path.join(self.test_dir, "docker-compose.yaml") + + def tearDown(self): + if os.path.isfile(self.output_file): + os.unlink(self.output_file) + + def test_convert_to_docker_compose_gaudi(self): + # Call the function directly + convert_to_docker_compose(self.mega_yaml, self.output_file, device="gaudi") + + # Load and verify the content of the generated docker-compose.yaml + with open(self.output_file, "r") as f: + docker_compose = yaml.safe_load(f) + + self.assertEqual(docker_compose["version"], "3.8") + self.assertIn("services", docker_compose) + self.assertIn("redis-vector-db", docker_compose["services"]) + self.assertIn("text-embeddings-inference-service", docker_compose["services"]) + self.assertEqual( + docker_compose["services"]["text-embeddings-inference-service"]["image"], "opea/tei-gaudi:latest" + ) + self.assertEqual(docker_compose["services"]["text-embeddings-inference-service"]["runtime"], "habana") + + def test_convert_to_docker_compose_xeon(self): + # Call the function directly + convert_to_docker_compose(self.mega_yaml, self.output_file, device="cpu") + + # Load and verify the content of the generated docker-compose.yaml + with open(self.output_file, "r") as f: + docker_compose = yaml.safe_load(f) + + self.assertEqual(docker_compose["version"], "3.8") + self.assertIn("services", docker_compose) + self.assertIn("redis-vector-db", docker_compose["services"]) + self.assertIn("text-embeddings-inference-service", docker_compose["services"]) + self.assertEqual( + docker_compose["services"]["text-embeddings-inference-service"]["image"], + "ghcr.io/huggingface/text-embeddings-inference:cpu-1.5", + ) + + # def test_convert_to_docker_compose_cli(self): + # # Define shell command + # command = ["opea", "export", "docker-compose", self.mega_yaml, self.output_file, "--device=cpu"] + + # # Run the CLI command + # result = subprocess.run(command, capture_output=True, text=True) + + # # Check for command success + # self.assertEqual(result.returncode, 0, f"Command failed with error: {result.stderr}") + + # # Verify the output file + # if os.path.isfile(self.output_file): + # print("Docker Compose file generated successfully.") + + # # Check for key properties in the docker-compose file + # with open(self.output_file, "r") as f: + # docker_compose_content = f.read() + + # self.assertEqual(docker_compose_content["version"], "3.8") + # self.assertIn("redis-vector-db", docker_compose_content) + # self.assertIn("text-embeddings-inference-service", docker_compose_content) + # else: + # self.fail("Docker Compose file not generated.") + + +if __name__ == "__main__": + unittest.main()