From 7e62175c2e3aac484576649c1219cc5a2a4bd2e3 Mon Sep 17 00:00:00 2001 From: chyundunovDatamonsters Date: Mon, 18 Nov 2024 13:58:38 +0700 Subject: [PATCH] Adding files to deploy CodeTrans application on AMD GPU (#1138) Signed-off-by: Chingis Yundunov --- .../docker_compose/amd/gpu/rocm/README.md | 121 ++++++++++++ .../docker_compose/amd/gpu/rocm/compose.yaml | 97 ++++++++++ .../docker_compose/amd/gpu/rocm/set_env.sh | 49 +++++ CodeTrans/tests/test_compose_on_rocm.sh | 180 ++++++++++++++++++ 4 files changed, 447 insertions(+) create mode 100644 CodeTrans/docker_compose/amd/gpu/rocm/README.md create mode 100644 CodeTrans/docker_compose/amd/gpu/rocm/compose.yaml create mode 100644 CodeTrans/docker_compose/amd/gpu/rocm/set_env.sh create mode 100644 CodeTrans/tests/test_compose_on_rocm.sh diff --git a/CodeTrans/docker_compose/amd/gpu/rocm/README.md b/CodeTrans/docker_compose/amd/gpu/rocm/README.md new file mode 100644 index 000000000..fafe837b4 --- /dev/null +++ b/CodeTrans/docker_compose/amd/gpu/rocm/README.md @@ -0,0 +1,121 @@ +# Build and deploy CodeTrans Application on AMD GPU (ROCm) + +## Build images + +### Build the LLM Docker Image + +```bash +### Cloning repo +git clone https://github.com/opea-project/GenAIComps.git +cd GenAIComps + +### Build Docker image +docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . +``` + +### Build the MegaService Docker Image + +```bash +### Cloning repo +git clone https://github.com/opea-project/GenAIExamples +cd GenAIExamples/CodeTrans + +### Build Docker image +docker build -t opea/codetrans:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### Build the UI Docker Image + +```bash +cd GenAIExamples/CodeTrans/ui +### Build UI Docker image +docker build -t opea/codetrans-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . +``` + +## Deploy CodeTrans Application + +### Features of Docker compose for AMD GPUs + +1. Added forwarding of GPU devices to the container TGI service with instructions: + +```yaml +shm_size: 1g +devices: + - /dev/kfd:/dev/kfd + - /dev/dri/:/dev/dri/ +cap_add: + - SYS_PTRACE +group_add: + - video +security_opt: + - seccomp:unconfined +``` + +In this case, all GPUs are thrown. To reset a specific GPU, you need to use specific device names cardN and renderN. + +For example: + +```yaml +shm_size: 1g +devices: + - /dev/kfd:/dev/kfd + - /dev/dri/card0:/dev/dri/card0 + - /dev/dri/render128:/dev/dri/render128 +cap_add: + - SYS_PTRACE +group_add: + - video +security_opt: + - seccomp:unconfined +``` + +To find out which GPU device IDs cardN and renderN correspond to the same GPU, use the GPU driver utility + +### Go to the directory with the Docker compose file + +```bash +cd GenAIExamples/CodeTrans/docker_compose/amd/gpu/rocm +``` + +### Set environments + +In the file "GenAIExamples/CodeTrans/docker_compose/amd/gpu/rocm/set_env.sh " it is necessary to set the required values. Parameter assignments are specified in the comments for each variable setting command + +```bash +chmod +x set_env.sh +. set_env.sh +``` + +### Run services + +``` +docker compose up -d +``` + +# Validate the MicroServices and MegaService + +## Validate TGI service + +```bash +curl http://${HOST_IP}:${CODETRANS_TGI_SERVICE_PORT}/generate \ + -X POST \ + -d '{"inputs":" ### System: Please translate the following Golang codes into Python codes. ### Original codes: '\'''\'''\''Golang \npackage main\n\nimport \"fmt\"\nfunc main() {\n fmt.Println(\"Hello, World!\");\n '\'''\'''\'' ### Translated codes:","parameters":{"max_new_tokens":17, "do_sample": true}}' \ + -H 'Content-Type: application/json' +``` + +## Validate LLM service + +```bash +curl http://${HOST_IP}:${CODETRANS_LLM_SERVICE_PORT}/v1/chat/completions \ + -X POST \ + -d '{"query":" ### System: Please translate the following Golang codes into Python codes. ### Original codes: '\'''\'''\''Golang \npackage main\n\nimport \"fmt\"\nfunc main() {\n fmt.Println(\"Hello, World!\");\n '\'''\'''\'' ### Translated codes:"}' \ + -H 'Content-Type: application/json' +``` + +## Validate MegaService + +```bash +curl http://${HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codetrans \ + -H "Content-Type: application/json" \ + -d '{"language_from": "Golang","language_to": "Python","source_code": "package main\n\nimport \"fmt\"\nfunc main() {\n fmt.Println(\"Hello, World!\");\n}"}' +``` diff --git a/CodeTrans/docker_compose/amd/gpu/rocm/compose.yaml b/CodeTrans/docker_compose/amd/gpu/rocm/compose.yaml new file mode 100644 index 000000000..cfad48a4d --- /dev/null +++ b/CodeTrans/docker_compose/amd/gpu/rocm/compose.yaml @@ -0,0 +1,97 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + codetrans-tgi-service: + image: ghcr.io/huggingface/text-generation-inference:2.3.1-rocm + container_name: codetrans-tgi-service + ports: + - "${CODETRANS_TGI_SERVICE_PORT:-8008}:80" + volumes: + - "/var/lib/GenAI/codetrans/data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TGI_LLM_ENDPOINT: ${CODETRANS_TGI_LLM_ENDPOINT} + HUGGING_FACE_HUB_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + HUGGINGFACEHUB_API_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + devices: + - /dev/kfd:/dev/kfd + - /dev/dri/:/dev/dri/ + cap_add: + - SYS_PTRACE + group_add: + - video + security_opt: + - seccomp:unconfined + ipc: host + command: --model-id ${CODETRANS_LLM_MODEL_ID} + codetrans-llm-server: + image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} + container_name: codetrans-llm-server + ports: + - "${CODETRANS_LLM_SERVICE_PORT:-9000}:9000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TGI_LLM_ENDPOINT: "http://codetrans-tgi-service" + HUGGINGFACEHUB_API_TOKEN: ${CODETRANS_HUGGINGFACEHUB_API_TOKEN} + restart: unless-stopped + codetrans-backend-server: + image: ${REGISTRY:-opea}/codetrans:${TAG:-latest} + container_name: codetrans-backend-server + depends_on: + - codetrans-tgi-service + - codetrans-llm-server + ports: + - "${CODETRANS_BACKEND_SERVICE_PORT:-7777}:7777" + environment: + no_proxy: ${no_proxy} + https_proxy: ${https_proxy} + http_proxy: ${http_proxy} + MEGA_SERVICE_HOST_IP: ${HOST_IP} + LLM_SERVICE_HOST_IP: "codetrans-llm-server" + ipc: host + restart: always + codetrans-ui-server: + image: ${REGISTRY:-opea}/codetrans-ui:${TAG:-latest} + container_name: codetrans-ui-server + depends_on: + - codetrans-backend-server + ports: + - "${CODETRANS_FRONTEND_SERVICE_PORT:-5173}:5173" + environment: + no_proxy: ${no_proxy} + https_proxy: ${https_proxy} + http_proxy: ${http_proxy} + BASE_URL: ${CODETRANS_BACKEND_SERVICE_URL} + BASIC_URL: ${CODETRANS_BACKEND_SERVICE_URL} + ipc: host + restart: always + codetrans-nginx-server: + image: ${REGISTRY:-opea}/nginx:${TAG:-latest} + container_name: codetrans-nginx-server + depends_on: + - codetrans-backend-server + - codetrans-ui-server + ports: + - "${CODETRANS_NGINX_PORT:-80}:80" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - FRONTEND_SERVICE_IP=${CODETRANS_FRONTEND_SERVICE_IP} + - FRONTEND_SERVICE_PORT=${CODETRANS_FRONTEND_SERVICE_PORT} + - BACKEND_SERVICE_NAME=${CODETRANS_BACKEND_SERVICE_NAME} + - BACKEND_SERVICE_IP=${CODETRANS_BACKEND_SERVICE_IP} + - BACKEND_SERVICE_PORT=${CODETRANS_BACKEND_SERVICE_PORT} + ipc: host + restart: always + +networks: + default: + driver: bridge diff --git a/CodeTrans/docker_compose/amd/gpu/rocm/set_env.sh b/CodeTrans/docker_compose/amd/gpu/rocm/set_env.sh new file mode 100644 index 000000000..38cd7e183 --- /dev/null +++ b/CodeTrans/docker_compose/amd/gpu/rocm/set_env.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +### The IP address or domain name of the server on which the application is running +export HOST_IP=direct-supercomputer1.powerml.co + +### Model ID +export CODETRANS_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct" + +### The port of the TGI service. On this port, the TGI service will accept connections +export CODETRANS_TGI_SERVICE_PORT=18156 + +### The endpoint of the TGI service to which requests to this service will be sent (formed from previously set variables) +export CODETRANS_TGI_LLM_ENDPOINT="http://${HOST_IP}:${CODETRANS_TGI_SERVICE_PORT}" + +### A token for accessing repositories with models +export CODETRANS_HUGGINGFACEHUB_API_TOKEN='' + +### The port of the LLM service. On this port, the LLM service will accept connections +export CODETRANS_LLM_SERVICE_PORT=18157 + +### The IP address or domain name of the server for CodeTrans MegaService +export CODETRANS_MEGA_SERVICE_HOST_IP=${HOST_IP} + +### The endpoint of the LLM service to which requests to this service will be sent +export CODETRANS_LLM_SERVICE_HOST_IP=${HOST_IP} + +### The ip address of the host on which the container with the frontend service is running +export CODETRANS_FRONTEND_SERVICE_IP=192.165.1.21 + +### The port of the frontend service +export CODETRANS_FRONTEND_SERVICE_PORT=18155 + +### Name of GenAI service for route requests to application +export CODETRANS_BACKEND_SERVICE_NAME=codetrans + +### The ip address of the host on which the container with the backend service is running +export CODETRANS_BACKEND_SERVICE_IP=192.165.1.21 + +### The port of the backend service +export CODETRANS_BACKEND_SERVICE_PORT=18154 + +### The port of the Nginx reverse proxy for application +export CODETRANS_NGINX_PORT=18153 + +### Endpoint of the backend service +export CODETRANS_BACKEND_SERVICE_URL="http://${HOST_IP}:${CODETRANS_BACKEND_SERVICE_PORT}/v1/codetrans" diff --git a/CodeTrans/tests/test_compose_on_rocm.sh b/CodeTrans/tests/test_compose_on_rocm.sh new file mode 100644 index 000000000..541990881 --- /dev/null +++ b/CodeTrans/tests/test_compose_on_rocm.sh @@ -0,0 +1,180 @@ +#!/bin/bash + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + cd $WORKPATH/docker_image_build + git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="codetrans codetrans-ui llm-tgi nginx" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/amd/gpu/rocm/ + export http_proxy=${http_proxy} + export https_proxy=${http_proxy} + export CODETRANS_TGI_SERVICE_PORT=8008 + export CODETRANS_LLM_SERVICE_PORT=9000 + export CODETRANS_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct" + export CODETRANS_TGI_LLM_ENDPOINT="http://${ip_address}:${CODETRANS_TGI_SERVICE_PORT}" + export CODETRANS_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + export CODETRANS_MEGA_SERVICE_HOST_IP=${ip_address} + export CODETRANS_LLM_SERVICE_HOST_IP=${ip_address} + export CODETRANS_FRONTEND_SERVICE_IP=${ip_address} + export CODETRANS_FRONTEND_SERVICE_PORT=5173 + export CODETRANS_BACKEND_SERVICE_NAME=codetrans + export CODETRANS_BACKEND_SERVICE_IP=${ip_address} + export CODETRANS_BACKEND_SERVICE_PORT=7777 + export CODETRANS_NGINX_PORT=8088 + export CODETRANS_BACKEND_SERVICE_URL="http://${ip_address}:${CODETRANS_BACKEND_SERVICE_PORT}/v1/codetrans" + + sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env + + # Start Docker Containers + docker compose up -d > ${LOG_PATH}/start_services_with_compose.log + + n=0 + until [[ "$n" -ge 100 ]]; do + docker logs codetrans-tgi-service > ${LOG_PATH}/tgi_service_start.log + if grep -q Connected ${LOG_PATH}/tgi_service_start.log; then + break + fi + sleep 5s + n=$((n+1)) + done +} + +function validate_services() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + sleep 5s +} + +function validate_microservices() { + # tgi for embedding service + validate_services \ + "${ip_address}:${CODETRANS_TGI_SERVICE_PORT}/generate" \ + "generated_text" \ + "codetrans-tgi-service" \ + "codetrans-tgi-service" \ + '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' + + # llm microservice + validate_services \ + "${ip_address}:${CODETRANS_LLM_SERVICE_PORT}/v1/chat/completions" \ + "data: " \ + "codetrans-llm-server" \ + "codetrans-llm-server" \ + '{"query":" ### System: Please translate the following Golang codes into Python codes. ### Original codes: '\'''\'''\''Golang \npackage main\n\nimport \"fmt\"\nfunc main() {\n fmt.Println(\"Hello, World!\");\n '\'''\'''\'' ### Translated codes:"}' + +} + +function validate_megaservice() { + # Curl the Mega Service + validate_services \ + "${ip_address}:${CODETRANS_BACKEND_SERVICE_PORT}/v1/codetrans" \ + "print" \ + "codetrans-backend-server" \ + "codetrans-backend-server" \ + '{"language_from": "Golang","language_to": "Python","source_code": "package main\n\nimport \"fmt\"\nfunc main() {\n fmt.Println(\"Hello, World!\");\n}\n"}' + + # test the megeservice via nginx + validate_services \ + "${ip_address}:${CODETRANS_NGINX_PORT}/v1/codetrans" \ + "print" \ + "codetrans-nginx-server" \ + "codetrans-nginx-server" \ + '{"language_from": "Golang","language_to": "Python","source_code": "package main\n\nimport \"fmt\"\nfunc main() {\n fmt.Println(\"Hello, World!\");\n}\n"}' + +} + +function validate_frontend() { + cd $WORKPATH/ui/svelte + local conda_env_name="OPEA_e2e" + export PATH=${HOME}/miniconda3/bin/:$PATH + if conda info --envs | grep -q "$conda_env_name"; then + echo "$conda_env_name exist!" + else + conda create -n ${conda_env_name} python=3.12 -y + fi + source activate ${conda_env_name} + + sed -i "s/localhost/$ip_address/g" playwright.config.ts + + conda install -c conda-forge nodejs -y + npm install && npm ci && npx playwright install --with-deps + node -v && npm -v && pip list + + exit_status=0 + npx playwright test || exit_status=$? + + if [ $exit_status -ne 0 ]; then + echo "[TEST INFO]: ---------frontend test failed---------" + exit $exit_status + else + echo "[TEST INFO]: ---------frontend test passed---------" + fi +} + +function stop_docker() { + cd $WORKPATH/docker_compose/amd/gpu/rocm/ + docker compose stop && docker compose rm -f +} + +function main() { + + stop_docker + + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + start_services + + validate_microservices + validate_megaservice + validate_frontend + + stop_docker + echo y | docker system prune + +} + +main