forked from opea-project/GenAIExamples
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Support export megaservice yaml to docker compose file (opea-project#642
) * Support export megaservice yaml tp docker compose file Signed-off-by: lvliang-intel <[email protected]> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add ut case Signed-off-by: lvliang-intel <[email protected]> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix issue Signed-off-by: lvliang-intel <[email protected]> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add code spell ignore Signed-off-by: lvliang-intel <[email protected]> * fix ut issue Signed-off-by: lvliang-intel <[email protected]> * disable cli case Signed-off-by: lvliang-intel <[email protected]> --------- Signed-off-by: lvliang-intel <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
- Loading branch information
1 parent
3ac391a
commit cff0a4d
Showing
6 changed files
with
531 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
assertIn | ||
assertEqual | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
# Copyright (C) 2024 Intel Corporation | ||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
import argparse | ||
|
||
from .exporter import convert_to_docker_compose | ||
|
||
|
||
def export_kubernetes_manifests(mega_yaml, output_dir, device="cpu"): | ||
print(f"Generating Kubernetes manifests from {mega_yaml} to {output_dir}") | ||
# Add your logic to convert the YAML to Kubernetes manifest here | ||
|
||
|
||
def export_docker_compose(mega_yaml, output_file, device="cpu"): | ||
print(f"Generating Docker Compose file from {mega_yaml} to {output_file}") | ||
convert_to_docker_compose(mega_yaml, output_file, device) | ||
|
||
|
||
def opea_execute(): | ||
parser = argparse.ArgumentParser(description="OPEA CLI tool") | ||
subparsers = parser.add_subparsers(dest="command", help="commands") | ||
|
||
# Subcommand for export | ||
export_parser = subparsers.add_parser("export", help="Export resources") | ||
|
||
# Subparsers for export to docker-compose and kubernetes | ||
export_subparsers = export_parser.add_subparsers(dest="export_command", help="Export commands") | ||
|
||
# Export to Docker Compose | ||
compose_parser = export_subparsers.add_parser("docker-compose", help="Export to Docker Compose") | ||
compose_parser.add_argument("mega_yaml", help="Path to the mega YAML file") | ||
compose_parser.add_argument("output_file", help="Path to the Docker Compose file") | ||
compose_parser.add_argument( | ||
"--device", choices=["cpu", "gaudi", "xpu", "gpu"], default="cpu", help="Device type to use (default: cpu)" | ||
) | ||
|
||
# Export to Kubernetes | ||
kube_parser = export_subparsers.add_parser("kubernetes", help="Export to Kubernetes") | ||
kube_parser.add_argument("mega_yaml", help="Path to the mega YAML file") | ||
kube_parser.add_argument("output_dir", help="Directory to store generated Kubernetes manifests") | ||
kube_parser.add_argument( | ||
"--device", choices=["cpu", "gaudi", "xpu", "gpu"], default="cpu", help="Device type to use (default: cpu)" | ||
) | ||
|
||
# Parse arguments | ||
args = parser.parse_args() | ||
|
||
# Execute appropriate command | ||
if args.command == "export": | ||
if args.export_command == "docker-compose": | ||
export_docker_compose(args.mega_yaml, args.output_file, args.device) | ||
elif args.export_command == "kubernetes": | ||
export_kubernetes_manifests(args.mega_yaml, args.output_dir, args.device) | ||
else: | ||
parser.print_help() | ||
else: | ||
parser.print_help() | ||
|
||
|
||
if __name__ == "__main__": | ||
opea_execute() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,264 @@ | ||
# Copyright (C) 2024 Intel Corporation | ||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
import copy | ||
import os | ||
|
||
import yaml | ||
|
||
|
||
def convert_to_docker_compose(mega_yaml, output_file, device="cpu"): | ||
with open(mega_yaml, "r") as f: | ||
mega_config = yaml.safe_load(f) | ||
|
||
services = {} | ||
env_vars = mega_config.get("environment_variables", {}) | ||
|
||
# Define environment variable mapping for specific services | ||
env_var_rename = {"data_prep": {"TEI_EMBEDDING_ENDPOINT": "TEI_ENDPOINT"}} | ||
|
||
for service_name, service_config in mega_config["opea_micro_services"].items(): | ||
for container_name, container_info in service_config.items(): | ||
safe_container_name = container_name.replace("/", "-") | ||
|
||
# Initialize environment variables by combining 'common' with specific ones | ||
environment = copy.deepcopy(env_vars.get("common", {})) # Start with 'common' vars | ||
# Service-specific environment (based on anchors like redis, tei_embedding, etc.) | ||
service_envs = container_info.get("environment", {}) # The environment anchors in the YAML | ||
for key, value in service_envs.items(): | ||
environment[key] = value # Update the environment with specific variables | ||
|
||
# Apply the renaming logic using the env_var_rename mapping | ||
renamed_environment = {} | ||
for key, value in environment.items(): | ||
# If the key needs to be renamed, rename it using the mapping | ||
if key in env_var_rename.get(service_name, {}): | ||
renamed_environment[env_var_rename[service_name][key]] = value | ||
else: | ||
renamed_environment[key] = value | ||
|
||
# Replace placeholders with actual values | ||
for key in renamed_environment: | ||
if ( | ||
isinstance(renamed_environment[key], str) | ||
and renamed_environment[key].startswith("${") | ||
and renamed_environment[key].endswith("}") | ||
): | ||
var_name = renamed_environment[key][2:-1] | ||
renamed_environment[key] = os.getenv(var_name, renamed_environment[key]) | ||
|
||
service_entry = { | ||
"image": f"{container_name}:{container_info['tag']}", | ||
"container_name": f"{safe_container_name}-server", | ||
"ports": [], | ||
"ipc": "host", | ||
"restart": "unless-stopped", | ||
"environment": renamed_environment, | ||
} | ||
|
||
# Add ports and special settings | ||
if service_name == "embedding": | ||
service_entry["ports"].append("6000:6000") | ||
elif service_name == "retrieval": | ||
service_entry["ports"].append("7000:7000") | ||
elif service_name == "reranking": | ||
service_entry["ports"].append("8000:8000") | ||
elif service_name == "llm": | ||
service_entry["ports"].append("9000:9000") | ||
|
||
# Add depends_on if necessary | ||
if container_name == "opea/dataprep-redis": | ||
service_entry["depends_on"] = ["redis-vector-db"] | ||
service_entry["ports"].append("6007:6007") | ||
elif container_name == "opea/embedding-tei": | ||
service_entry["depends_on"] = ["tei-embedding-service"] | ||
|
||
# Add volumes for specific services | ||
if "volume" in container_info: | ||
service_entry["volumes"] = container_info["volume"] | ||
|
||
services[safe_container_name] = service_entry | ||
|
||
# Additional services like redis | ||
services["redis-vector-db"] = { | ||
"image": "redis/redis-stack:7.2.0-v9", | ||
"container_name": "redis-vector-db", | ||
"ports": ["6379:6379", "8001:8001"], | ||
} | ||
|
||
# Process embedding service | ||
embedding_service = mega_config["opea_micro_services"].get("embedding", {}).get("opea/embedding-tei", {}) | ||
if embedding_service: | ||
embedding_dependencies = embedding_service.get("dependency", {}) | ||
for dep_name, dep_info in embedding_dependencies.items(): | ||
if dep_name == "ghcr.io/huggingface/text-embeddings-inference": | ||
if device == "cpu": | ||
model_id = dep_info.get("requirements", {}).get("model_id", "") | ||
services["text-embeddings-inference-service"] = { | ||
"image": f"{dep_name}:{dep_info['tag']}", | ||
"container_name": "text-embeddings-inference-server", | ||
"ports": ["8090:80"], | ||
"ipc": "host", | ||
"environment": { | ||
**env_vars.get("common", {}), | ||
"HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), | ||
}, | ||
"command": f"--model-id {model_id} --auto-truncate", | ||
} | ||
elif dep_name == "opea/tei-gaudi": | ||
if device == "gaudi": | ||
model_id = dep_info.get("requirements", {}).get("model_id", "") | ||
services["text-embeddings-inference-service"] = { | ||
"image": f"{dep_name}:{dep_info['tag']}", | ||
"container_name": "text-embeddings-inference-server", | ||
"ports": ["8090:80"], | ||
"ipc": "host", | ||
"environment": { | ||
**env_vars.get("common", {}), | ||
"HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), | ||
}, | ||
"command": f"--model-id {model_id} --auto-truncate", | ||
} | ||
# Add specific settings for Habana (Gaudi) devices | ||
services["text-embeddings-inference-service"]["runtime"] = "habana" | ||
services["text-embeddings-inference-service"]["cap_add"] = ["SYS_NICE"] | ||
services["text-embeddings-inference-service"]["environment"].update( | ||
{ | ||
"HABANA_VISIBLE_DEVICES": "all", | ||
"OMPI_MCA_btl_vader_single_copy_mechanism": "none", | ||
"MAX_WARMUP_SEQUENCE_LENGTH": "512", | ||
"INIT_HCCL_ON_ACQUIRE": "0", | ||
"ENABLE_EXPERIMENTAL_FLAGS": "true", | ||
} | ||
) | ||
|
||
# Reranking service handling | ||
reranking_service = mega_config["opea_micro_services"].get("reranking", {}).get("opea/reranking-tei", {}) | ||
if reranking_service: | ||
rerank_dependencies = reranking_service.get("dependency", {}) | ||
for dep_name, dep_info in rerank_dependencies.items(): | ||
if dep_name == "ghcr.io/huggingface/text-embeddings-inference": | ||
if device == "cpu": | ||
model_id = dep_info.get("requirements", {}).get("model_id", "") | ||
services["tei-reranking-service"] = { | ||
"image": f"{dep_name}:{dep_info['tag']}", | ||
"container_name": "tei-reranking-server", | ||
"ports": ["8808:80"], | ||
"volumes": ["./data:/data"], | ||
"shm_size": "1g", | ||
"environment": { | ||
**env_vars.get("common", {}), | ||
"HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), | ||
"HF_HUB_DISABLE_PROGRESS_BARS": "1", | ||
"HF_HUB_ENABLE_HF_TRANSFER": "0", | ||
}, | ||
"command": f"--model-id {model_id} --auto-truncate", | ||
} | ||
elif dep_name == "opea/tei-gaudi": | ||
if device == "gaudi": | ||
model_id = dep_info.get("requirements", {}).get("model_id", "") | ||
services["tei-reranking-service"] = { | ||
"image": f"{dep_name}:{dep_info['tag']}", | ||
"container_name": "tei-reranking-gaudi-server", | ||
"ports": ["8808:80"], | ||
"volumes": ["./data:/data"], | ||
"shm_size": "1g", | ||
"environment": { | ||
**env_vars.get("common", {}), | ||
"HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), | ||
"HF_HUB_DISABLE_PROGRESS_BARS": "1", | ||
"HF_HUB_ENABLE_HF_TRANSFER": "0", | ||
}, | ||
"command": f"--model-id {model_id} --auto-truncate", | ||
} | ||
# Add specific settings for Habana (Gaudi) devices | ||
services["tei-reranking-service"]["runtime"] = "habana" | ||
services["tei-reranking-service"]["cap_add"] = ["SYS_NICE"] | ||
services["tei-reranking-service"]["environment"].update( | ||
{ | ||
"HABANA_VISIBLE_DEVICES": "all", | ||
"OMPI_MCA_btl_vader_single_copy_mechanism": "none", | ||
"MAX_WARMUP_SEQUENCE_LENGTH": "512", | ||
"INIT_HCCL_ON_ACQUIRE": "0", | ||
"ENABLE_EXPERIMENTAL_FLAGS": "true", | ||
} | ||
) | ||
|
||
# LLM service | ||
llm_service = mega_config["opea_micro_services"].get("llm", {}).get("opea/llm-tgi", {}) | ||
if llm_service: | ||
llm_dependencies = llm_service.get("dependency", {}) | ||
for dep_name, dep_info in llm_dependencies.items(): | ||
if dep_name == "ghcr.io/huggingface/text-generation-inference": | ||
if device == "cpu": | ||
model_id = dep_info.get("requirements", {}).get("model_id", "") | ||
services["llm-service"] = { | ||
"image": f"{dep_name}:{dep_info['tag']}", | ||
"container_name": "llm-server", | ||
"ports": ["9001:80"], | ||
"environment": { | ||
**env_vars.get("common", {}), | ||
"HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), | ||
}, | ||
"command": f"--model-id {model_id} --max-input-length 1024 --max-total-tokens 2048", | ||
} | ||
elif dep_name == "ghcr.io/huggingface/tgi-gaudi": | ||
if device == "gaudi": | ||
model_id = dep_info.get("requirements", {}).get("model_id", "") | ||
services["llm-service"] = { | ||
"image": f"{dep_name}:{dep_info['tag']}", | ||
"container_name": "llm-server", | ||
"ports": ["9001:80"], | ||
"environment": { | ||
**env_vars.get("common", {}), | ||
"HUGGINGFACEHUB_API_TOKEN": env_vars.get("HUGGINGFACEHUB_API_TOKEN", ""), | ||
}, | ||
"command": f"--model-id {model_id} --max-input-length 1024 --max-total-tokens 2048", | ||
} | ||
# Add specific settings for Habana (Gaudi) devices | ||
services["llm-service"]["runtime"] = "habana" | ||
services["llm-service"]["cap_add"] = ["SYS_NICE"] | ||
services["llm-service"]["environment"].update( | ||
{ | ||
"HABANA_VISIBLE_DEVICES": "all", | ||
"OMPI_MCA_btl_vader_single_copy_mechanism": "none", | ||
} | ||
) | ||
|
||
# Extract configuration for all examples from 'opea_mega_service' | ||
examples = ["chatqna", "faqgen", "audioqna", "visualqna", "codegen", "codetrans"] | ||
for example in examples: | ||
service_name = f"opea/{example}" | ||
ui_service_name = f"opea/{example}-ui" | ||
|
||
# Process both the main service and the UI service | ||
for service in [service_name, ui_service_name]: | ||
# Check if the service exists in the mega.yaml | ||
if service in mega_config.get("opea_mega_service", {}): | ||
service_config = mega_config["opea_mega_service"][service] | ||
container_name = service | ||
safe_container_name = container_name.replace("/", "-") | ||
tag = service_config.get("tag", "latest") | ||
environment = {**env_vars.get("common", {}), **service_config.get("environment", {})} | ||
|
||
service_entry = { | ||
"image": f"{container_name}:{tag}", | ||
"container_name": f"{safe_container_name}-server", | ||
"ports": ["5173:5173"] if "-ui" in service else ["8888:8888"], | ||
"ipc": "host", | ||
"restart": "unless-stopped", | ||
"environment": environment, | ||
} | ||
services[safe_container_name] = service_entry | ||
|
||
docker_compose = { | ||
"version": "3.8", | ||
"services": services, | ||
"networks": {"default": {"driver": "bridge"}}, | ||
} | ||
|
||
# Write to docker-compose.yaml | ||
with open(output_file, "w") as f: | ||
yaml.dump(docker_compose, f, default_flow_style=False) | ||
|
||
print("Docker Compose file generated:", output_file) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.