From ec3b2e841f23d1ee5dc4d89a57d34e51cf5a5909 Mon Sep 17 00:00:00 2001 From: XinyaoWa Date: Fri, 12 Jul 2024 11:31:14 +0800 Subject: [PATCH] Add vLLM on Ray microservice (#285) Signed-off-by: Xinyao Wang --- .../llms/text-generation/ray_serve/README.md | 10 +- comps/llms/text-generation/vllm-ray/README.md | 81 ++++++++ .../llms/text-generation/vllm-ray/__init__.py | 2 + .../vllm-ray/build_docker_microservice.sh | 9 + .../vllm-ray/build_docker_vllmray.sh | 12 ++ .../vllm-ray/docker/Dockerfile.microservice | 37 ++++ .../vllm-ray/docker/Dockerfile.vllmray | 31 +++ .../vllm-ray/launch_microservice.sh | 13 ++ .../vllm-ray/launch_vllmray.sh | 43 +++++ comps/llms/text-generation/vllm-ray/llm.py | 83 ++++++++ comps/llms/text-generation/vllm-ray/query.sh | 15 ++ .../text-generation/vllm-ray/requirements.txt | 17 ++ .../vllm-ray/vllm_ray_openai.py | 180 ++++++++++++++++++ tests/test_llms_text-generation_vllm-ray.sh | 94 +++++++++ 14 files changed, 620 insertions(+), 7 deletions(-) create mode 100644 comps/llms/text-generation/vllm-ray/README.md create mode 100644 comps/llms/text-generation/vllm-ray/__init__.py create mode 100644 comps/llms/text-generation/vllm-ray/build_docker_microservice.sh create mode 100755 comps/llms/text-generation/vllm-ray/build_docker_vllmray.sh create mode 100644 comps/llms/text-generation/vllm-ray/docker/Dockerfile.microservice create mode 100644 comps/llms/text-generation/vllm-ray/docker/Dockerfile.vllmray create mode 100644 comps/llms/text-generation/vllm-ray/launch_microservice.sh create mode 100755 comps/llms/text-generation/vllm-ray/launch_vllmray.sh create mode 100644 comps/llms/text-generation/vllm-ray/llm.py create mode 100644 comps/llms/text-generation/vllm-ray/query.sh create mode 100644 comps/llms/text-generation/vllm-ray/requirements.txt create mode 100644 comps/llms/text-generation/vllm-ray/vllm_ray_openai.py create mode 100644 tests/test_llms_text-generation_vllm-ray.sh diff --git a/comps/llms/text-generation/ray_serve/README.md b/comps/llms/text-generation/ray_serve/README.md index 6549ad960..0a1fbc23f 100644 --- a/comps/llms/text-generation/ray_serve/README.md +++ b/comps/llms/text-generation/ray_serve/README.md @@ -21,13 +21,9 @@ export HF_TOKEN= And then you can make requests with the OpenAI-compatible APIs like below to check the service status: ```bash -curl http://127.0.0.1:8008/v1/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "model": , - "messages": [{"role": "user", "content": "What is deep learning?"}], - "max_tokens": 32, - }' +curl http://172.17.0.1:8008/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{"model": , "messages": [{"role": "user", "content": "How are you?"}], "max_tokens": 32 }' ``` For more information about the OpenAI APIs, you can checkeck the [OpenAI official document](https://platform.openai.com/docs/api-reference/). diff --git a/comps/llms/text-generation/vllm-ray/README.md b/comps/llms/text-generation/vllm-ray/README.md new file mode 100644 index 000000000..7521f814b --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/README.md @@ -0,0 +1,81 @@ +# VLLM-Ray Endpoint Service + +[Ray](https://docs.ray.io/en/latest/serve/index.html) is an LLM serving solution that makes it easy to deploy and manage a variety of open source LLMs, built on [Ray Serve](https://docs.ray.io/en/latest/serve/index.html), has native support for autoscaling and multi-node deployments, which is easy to use for LLM inference serving on Intel Gaudi2 accelerators. The Intel Gaudi2 accelerator supports both training and inference for deep learning models in particular for LLMs. Please visit [Habana AI products](<(https://habana.ai/products)>) for more details. + +[vLLM](https://github.com/vllm-project/vllm) is a fast and easy-to-use library for LLM inference and serving, it delivers state-of-the-art serving throughput with a set of advanced features such as PagedAttention, Continuous batching and etc.. Besides GPUs, vLLM already supported [Intel CPUs](https://www.intel.com/content/www/us/en/products/overview.html) and [Gaudi accelerators](https://habana.ai/products). + +This guide provides an example on how to launch vLLM with Ray serve endpoint on Gaudi accelerators. + +## Set up environment + +```bash +export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} +export vLLM_RAY_ENDPOINT="http://${your_ip}:8006" +export LLM_MODEL=${your_hf_llm_model} +``` + +For gated models such as `LLAMA-2`, you will have to pass the environment HUGGINGFACEHUB_API_TOKEN. Please follow this link [huggingface token](https://huggingface.co/docs/hub/security-tokens) to get the access token and export `HUGGINGFACEHUB_API_TOKEN` environment with the token. + +## Set up VLLM Ray Gaudi Service + +### Build docker + +```bash +bash ./build_docker_vllmray.sh +``` + +### Launch the service + +```bash +bash ./launch_vllmray.sh +``` + +The `launch_vllmray.sh` script accepts three parameters: + +- port_number: The port number assigned to the Ray Gaudi endpoint, with the default being 8006. +- model_name: The model name utilized for LLM, with the default set to meta-llama/Llama-2-7b-chat-hf. +- parallel_number: The number of HPUs specifies the number of HPUs per worker process, the default is set to 2. +- enforce_eager: Whether to enforce eager execution, default to be False. + +If you want to customize the setting, can run: + +```bash +bash ./launch_vllmray.sh ${port_number} ${model_name} ${parallel_number} False/True +``` + +### Query the service + +And then you can make requests with the OpenAI-compatible APIs like below to check the service status: + +```bash +curl http://${your_ip}:8006/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{"model": $LLM_MODEL, "messages": [{"role": "user", "content": "How are you?"}]}' +``` + +For more information about the OpenAI APIs, you can checkeck the [OpenAI official document](https://platform.openai.com/docs/api-reference/). + +## Set up OPEA microservice + +Then we warp the VLLM Ray service into OPEA microcervice. + +### Build docker + +```bash +bash ./build_docker_microservice.sh +``` + +### Launch the microservice + +```bash +bash ./launch_microservice.sh +``` + +### Query the microservice + +```bash +curl http://${your_ip}:9000/v1/chat/completions \ + -X POST \ + -d '{"query":"What is Deep Learning?","max_new_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"streaming":false}' \ + -H 'Content-Type: application/json' +``` diff --git a/comps/llms/text-generation/vllm-ray/__init__.py b/comps/llms/text-generation/vllm-ray/__init__.py new file mode 100644 index 000000000..916f3a44b --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/comps/llms/text-generation/vllm-ray/build_docker_microservice.sh b/comps/llms/text-generation/vllm-ray/build_docker_microservice.sh new file mode 100644 index 000000000..fd708dd78 --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/build_docker_microservice.sh @@ -0,0 +1,9 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +cd ../../../../ +docker build \ + -t opea/llm-vllm-ray:latest \ + --build-arg https_proxy=$https_proxy \ + --build-arg http_proxy=$http_proxy \ + -f comps/llms/text-generation/vllm-ray/docker/Dockerfile.microservice . diff --git a/comps/llms/text-generation/vllm-ray/build_docker_vllmray.sh b/comps/llms/text-generation/vllm-ray/build_docker_vllmray.sh new file mode 100755 index 000000000..33d35fe50 --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/build_docker_vllmray.sh @@ -0,0 +1,12 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +cd docker + +docker build \ + -f Dockerfile.vllmray ../../ \ + -t vllm_ray:habana \ + --network=host \ + --build-arg http_proxy=${http_proxy} \ + --build-arg https_proxy=${https_proxy} \ + --build-arg no_proxy=${no_proxy} diff --git a/comps/llms/text-generation/vllm-ray/docker/Dockerfile.microservice b/comps/llms/text-generation/vllm-ray/docker/Dockerfile.microservice new file mode 100644 index 000000000..10d6500a1 --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/docker/Dockerfile.microservice @@ -0,0 +1,37 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM langchain/langchain:latest + +RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \ + libgl1-mesa-glx \ + libjemalloc-dev \ + vim + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +USER user + +COPY comps /home/user/comps + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /home/user/comps/llms/text-generation/vllm-ray/requirements.txt + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/llms/text-generation/vllm-ray + +ENTRYPOINT ["python", "llm.py"] \ No newline at end of file diff --git a/comps/llms/text-generation/vllm-ray/docker/Dockerfile.vllmray b/comps/llms/text-generation/vllm-ray/docker/Dockerfile.vllmray new file mode 100644 index 000000000..a08e65592 --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/docker/Dockerfile.vllmray @@ -0,0 +1,31 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# FROM vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest +FROM vault.habana.ai/gaudi-docker/1.16.0/ubuntu22.04/habanalabs/pytorch-installer-2.2.2:latest + +ENV LANG=en_US.UTF-8 + +WORKDIR /root/vllm-ray + +# copy the source code to the package directory +COPY ../../vllm-ray/ /root/vllm-ray + +RUN pip install --upgrade-strategy eager optimum[habana] && \ + pip install git+https://github.com/HabanaAI/DeepSpeed.git@1.15.1 +# RUN pip install -v git+https://github.com/HabanaAI/vllm-fork.git@ae3d6121 +RUN pip install -v git+https://github.com/HabanaAI/vllm-fork.git@cf6952d +RUN pip install "ray>=2.10" "ray[serve,tune]>=2.10" + +RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config && \ + service ssh restart + +ENV no_proxy=localhost,127.0.0.1 +ENV PYTHONPATH=$PYTHONPATH:/root:/root/vllm-ray + +# Required by DeepSpeed +ENV RAY_EXPERIMENTAL_NOSET_HABANA_VISIBLE_MODULES=1 + +ENV PT_HPU_LAZY_ACC_PAR_MODE=0 + +ENV PT_HPU_ENABLE_LAZY_COLLECTIVES=true \ No newline at end of file diff --git a/comps/llms/text-generation/vllm-ray/launch_microservice.sh b/comps/llms/text-generation/vllm-ray/launch_microservice.sh new file mode 100644 index 000000000..628102032 --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/launch_microservice.sh @@ -0,0 +1,13 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +docker run -d --rm \ + --name="llm-vllm-ray-server" \ + -p 9000:9000 \ + --ipc=host \ + -e http_proxy=$http_proxy \ + -e https_proxy=$https_proxy \ + -e vLLM_RAY_ENDPOINT=$vLLM_RAY_ENDPOINT \ + -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN \ + -e LLM_MODEL=$LLM_MODEL \ + opea/llm-vllm-ray:latest diff --git a/comps/llms/text-generation/vllm-ray/launch_vllmray.sh b/comps/llms/text-generation/vllm-ray/launch_vllmray.sh new file mode 100755 index 000000000..fcff33265 --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/launch_vllmray.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Set default values +default_port=8006 +default_model=$LLM_MODEL +default_parallel_number=2 +default_enforce_eager=False + +# Assign arguments to variables +port_number=${1:-$default_port} +model_name=${2:-$default_model} +parallel_number=${3:-$default_parallel_number} +enforce_eager=${4:-$default_enforce_eager} + +# Check if all required arguments are provided +if [ "$#" -lt 0 ] || [ "$#" -gt 3 ]; then + echo "Usage: $0 [port_number] [model_name] [parallel_number] [enforce_eager]" + echo "Please customize the arguments you want to use. + - port_number: The port number assigned to the Ray Gaudi endpoint, with the default being 8080. + - model_name: The model name utilized for LLM, with the default set to meta-llama/Llama-2-7b-chat-hf. + - parallel_number: The number of HPUs specifies the number of HPUs per worker process. + - enforce_eager: Whether to enforce eager execution, default to be True." + exit 1 +fi + +# Build the Docker run command based on the number of cards +docker run -d --rm \ + --name="vllm-ray-service" \ + --runtime=habana \ + -v $PWD/data:/data \ + -e HABANA_VISIBLE_DEVICES=all \ + -e OMPI_MCA_btl_vader_single_copy_mechanism=none \ + --cap-add=sys_nice \ + --ipc=host \ + -p $port_number:8000 \ + -e HTTPS_PROXY=$https_proxy \ + -e HTTP_PROXY=$https_proxy \ + -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN \ + vllm_ray:habana \ + /bin/bash -c "ray start --head && python vllm_ray_openai.py --port_number 8000 --model_id_or_path $model_name --tensor_parallel_size $parallel_number --enforce_eager $enforce_eager" diff --git a/comps/llms/text-generation/vllm-ray/llm.py b/comps/llms/text-generation/vllm-ray/llm.py new file mode 100644 index 000000000..dc0c4b669 --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/llm.py @@ -0,0 +1,83 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from fastapi.responses import StreamingResponse +from langchain_openai import ChatOpenAI +from langsmith import traceable + +from comps import GeneratedDoc, LLMParamsDoc, ServiceType, opea_microservices, register_microservice + + +@traceable(run_type="tool") +def post_process_text(text: str): + if text == " ": + return "data: @#$\n\n" + if text == "\n": + return "data:
\n\n" + if text.isspace(): + return None + new_text = text.replace(" ", "@#$") + return f"data: {new_text}\n\n" + + +@register_microservice( + name="opea_service@llm_vllm_ray", + service_type=ServiceType.LLM, + endpoint="/v1/chat/completions", + host="0.0.0.0", + port=9000, +) +@traceable(run_type="llm") +def llm_generate(input: LLMParamsDoc): + llm_endpoint = os.getenv("vLLM_RAY_ENDPOINT", "http://localhost:8006") + llm_model = os.getenv("LLM_MODEL", "meta-llama/Llama-2-7b-chat-hf") + llm = ChatOpenAI( + openai_api_base=llm_endpoint + "/v1", + model_name=llm_model, + openai_api_key=os.getenv("OPENAI_API_KEY", "not_needed"), + max_tokens=input.max_new_tokens, + temperature=input.temperature, + streaming=input.streaming, + request_timeout=600, + ) + + if input.streaming: + + async def stream_generator(): + chat_response = "" + async for text in llm.astream(input.query): + text = text.content + chat_response += text + processed_text = post_process_text(text) + if text and processed_text: + if "" in text: + res = text.split("")[0] + if res != "": + yield res + break + yield processed_text + print(f"[llm - chat_stream] stream response: {chat_response}") + yield "data: [DONE]\n\n" + + return StreamingResponse(stream_generator(), media_type="text/event-stream") + else: + response = llm.invoke(input.query) + response = response.content + return GeneratedDoc(text=response, prompt=input.query) + + +if __name__ == "__main__": + opea_microservices["opea_service@llm_vllm_ray"].start() diff --git a/comps/llms/text-generation/vllm-ray/query.sh b/comps/llms/text-generation/vllm-ray/query.sh new file mode 100644 index 000000000..3555751d1 --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/query.sh @@ -0,0 +1,15 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +your_ip="0.0.0.0" + +##query vllm ray service +curl http://${your_ip}:8006/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{"model": "meta-llama/Llama-2-7b-chat-hf", "messages": [{"role": "user", "content": "How are you?"}]}' + +##query microservice +curl http://${your_ip}:9000/v1/chat/completions \ + -X POST \ + -d '{"query":"What is Deep Learning?","max_new_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"streaming":false}' \ + -H 'Content-Type: application/json' diff --git a/comps/llms/text-generation/vllm-ray/requirements.txt b/comps/llms/text-generation/vllm-ray/requirements.txt new file mode 100644 index 000000000..1c511793d --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/requirements.txt @@ -0,0 +1,17 @@ +docarray[full] +fastapi +huggingface_hub +langchain==0.1.16 +langchain_openai +langserve +langsmith +openai +opentelemetry-api +opentelemetry-exporter-otlp +opentelemetry-sdk +prometheus-fastapi-instrumentator +ray[serve]>=2.10 +setuptools==69.5.1 +shortuuid +transformers +vllm diff --git a/comps/llms/text-generation/vllm-ray/vllm_ray_openai.py b/comps/llms/text-generation/vllm-ray/vllm_ray_openai.py new file mode 100644 index 000000000..8e0f2f12a --- /dev/null +++ b/comps/llms/text-generation/vllm-ray/vllm_ray_openai.py @@ -0,0 +1,180 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import argparse +import logging +import os +import sys + +# __serve_example_begin__ +from typing import Dict, List, Optional + +import torch +from fastapi import FastAPI +from huggingface_hub import login +from ray import serve +from starlette.requests import Request +from starlette.responses import JSONResponse, StreamingResponse +from vllm.engine.arg_utils import AsyncEngineArgs +from vllm.engine.async_llm_engine import AsyncLLMEngine +from vllm.entrypoints.openai.cli_args import make_arg_parser +from vllm.entrypoints.openai.protocol import ChatCompletionRequest, ChatCompletionResponse, ErrorResponse +from vllm.entrypoints.openai.serving_chat import OpenAIServingChat +from vllm.entrypoints.openai.serving_engine import LoRAModulePath + +hg_token = os.getenv("HUGGINGFACEHUB_API_TOKEN", "") +if hg_token != "": + login(token=hg_token) + +logger = logging.getLogger("ray.serve") + +app = FastAPI() + + +@serve.deployment( + autoscaling_config={ + "min_replicas": 1, + "max_replicas": 10, + "target_ongoing_requests": 5, + }, + max_ongoing_requests=10, +) +@serve.ingress(app) +class VLLMDeployment: + def __init__( + self, + engine_args: AsyncEngineArgs, + response_role: str, + lora_modules: Optional[List[LoRAModulePath]] = None, + chat_template: Optional[str] = None, + ): + logger.info(f"Starting with engine args: {engine_args}") + self.engine = AsyncLLMEngine.from_engine_args(engine_args) + + # Determine the name of the served model for the OpenAI client. + if engine_args.served_model_name is not None: + served_model_names = engine_args.served_model_name + else: + served_model_names = [engine_args.model] + self.openai_serving_chat = OpenAIServingChat( + self.engine, served_model_names, response_role, lora_modules, chat_template + ) + + @app.post("/v1/chat/completions") + async def create_chat_completion(self, request: ChatCompletionRequest, raw_request: Request): + """OpenAI-compatible HTTP endpoint. + + API reference: + - https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html + """ + logger.info(f"Request: {request}") + generator = await self.openai_serving_chat.create_chat_completion(request, raw_request) + if isinstance(generator, ErrorResponse): + return JSONResponse(content=generator.model_dump(), status_code=generator.code) + if request.stream: + return StreamingResponse(content=generator, media_type="text/event-stream") + else: + assert isinstance(generator, ChatCompletionResponse) + return JSONResponse(content=generator.model_dump()) + + +def parse_vllm_args(cli_args: Dict[str, str]): + """Parses vLLM args based on CLI inputs. + + Currently uses argparse because vLLM doesn't expose Python models for all of the + config options we want to support. + """ + parser = make_arg_parser() + arg_strings = [] + for key, value in cli_args.items(): + arg_strings.extend([f"--{key}", str(value)]) + logger.info(arg_strings) + parsed_args = parser.parse_args(args=arg_strings) + return parsed_args + + +def build_app(cli_args: Dict[str, str]) -> serve.Application: + """Builds the Serve app based on CLI arguments. + + See https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html#command-line-arguments-for-the-server + for the complete set of arguments. + + Supported engine arguments: https://docs.vllm.ai/en/latest/models/engine_args.html. + """ # noqa: E501 + device = cli_args.pop("device") + enforce_eager = cli_args.pop("enforce_eager") + parsed_args = parse_vllm_args(cli_args) + engine_args = AsyncEngineArgs.from_cli_args(parsed_args) + engine_args.worker_use_ray = True + engine_args.enforce_eager = enforce_eager + engine_args.block_size = 128 + engine_args.max_num_seqs = 256 + engine_args.max_seq_len_to_capture = 2048 + + tp = engine_args.tensor_parallel_size + logger.info(f"Tensor parallelism = {tp}") + pg_resources = [] + pg_resources.append({"CPU": 1}) # for the deployment replica + for i in range(tp): + pg_resources.append({"CPU": 1, device: 1}) # for the vLLM actors + + # We use the "STRICT_PACK" strategy below to ensure all vLLM actors are placed on + # the same Ray node. + return VLLMDeployment.options(placement_group_bundles=pg_resources, placement_group_strategy="STRICT_PACK").bind( + engine_args, + parsed_args.response_role, + parsed_args.lora_modules, + parsed_args.chat_template, + ) + + +# __serve_example_end__ + + +def str2bool(v): + if isinstance(v, bool): + return v + if v.lower() in ("yes", "true", "t", "y", "1"): + return True + elif v.lower() in ("no", "false", "f", "n", "0"): + return False + else: + raise argparse.ArgumentTypeError("Boolean value expected.") + + +def main(argv=None): + parser = argparse.ArgumentParser(description="Serve vLLM models with Ray.", add_help=True) + parser.add_argument("--port_number", default="8000", type=str, help="Port number to serve on.", required=False) + parser.add_argument( + "--model_id_or_path", + default="meta-llama/Llama-2-7b-chat-hf", + type=str, + help="Model id or path.", + required=False, + ) + parser.add_argument( + "--tensor_parallel_size", default=2, type=int, help="parallel nodes number for 'hpu' mode.", required=False + ) + parser.add_argument( + "--enforce_eager", default=False, type=str2bool, help="Whether to enforce eager execution", required=False + ) + args = parser.parse_args(argv) + + serve.start(http_options={"host": "0.0.0.0", "port": args.port_number}) + serve.run( + build_app( + { + "model": args.model_id_or_path, + "tensor-parallel-size": args.tensor_parallel_size, + "device": "HPU", + "enforce_eager": args.enforce_eager, + } + ) + ) + # input("Service is deployed successfully.") + while 1: + pass + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/tests/test_llms_text-generation_vllm-ray.sh b/tests/test_llms_text-generation_vllm-ray.sh new file mode 100644 index 000000000..cae551bf5 --- /dev/null +++ b/tests/test_llms_text-generation_vllm-ray.sh @@ -0,0 +1,94 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe + +WORKPATH=$(dirname "$PWD") +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + ## Build VLLM Ray docker + cd $WORKPATH/comps/llms/text-generation/vllm-ray/docker + docker build \ + -f Dockerfile.vllmray \ + -t vllm_ray:habana --network=host ../../ + + ## Build OPEA microservice docker + cd $WORKPATH + docker build \ + -t opea/llm-vllm-ray:comps \ + -f comps/llms/text-generation/vllm-ray/docker/Dockerfile.microservice . +} + +function start_service() { + export LLM_MODEL="facebook/opt-125m" + port_number=8006 + docker run -d --rm \ + --name="test-comps-vllm-ray-service" \ + --runtime=habana \ + -v $PWD/data:/data \ + -e HABANA_VISIBLE_DEVICES=all \ + -e OMPI_MCA_btl_vader_single_copy_mechanism=none \ + --cap-add=sys_nice \ + --ipc=host \ + -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN \ + -p $port_number:8000 \ + vllm_ray:habana \ + /bin/bash -c "ray start --head && python vllm_ray_openai.py --port_number 8000 --model_id_or_path $LLM_MODEL --tensor_parallel_size 2 --enforce_eager False" + + export vLLM_RAY_ENDPOINT="http://${ip_address}:${port_number}" + docker run -d --rm\ + --name="test-comps-vllm-ray-microservice" \ + -p 9000:9000 \ + --ipc=host \ + -e vLLM_RAY_ENDPOINT=$vLLM_RAY_ENDPOINT \ + -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN \ + -e LLM_MODEL=$LLM_MODEL \ + opea/llm-vllm-ray:comps + + # check whether vllm ray is fully ready + n=0 + until [[ "$n" -ge 100 ]] || [[ $ready == true ]]; do + docker logs test-comps-vllm-ray-service > ${WORKPATH}/tests/test-comps-vllm-ray-service.log + n=$((n+1)) + if grep -q Connected ${WORKPATH}/tests/test-comps-vllm-ray-service.log; then + break + fi + sleep 5s + done + sleep 5s +} + +function validate_microservice() { + http_proxy="" curl http://${ip_address}:8006/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{"model": "facebook/opt-125m", "messages": [{"role": "user", "content": "How are you?"}]}' + http_proxy="" curl http://${ip_address}:9000/v1/chat/completions \ + -X POST \ + -d '{"query":"What is Deep Learning?","max_new_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"streaming":false}' \ + -H 'Content-Type: application/json' + docker logs test-comps-vllm-ray-service + docker logs test-comps-vllm-ray-microservice +} + +function stop_docker() { + cid=$(docker ps -aq --filter "name=test-comps-vllm-ray*") + if [[ ! -z "$cid" ]]; then docker rm $cid -f && sleep 1s; fi +} + +function main() { + + stop_docker + + build_docker_images + start_service + + validate_microservice + + stop_docker + # echo y | docker system prune + +} + +main