Skip to content

Commit

Permalink
update image build compose (opea-project#698)
Browse files Browse the repository at this point in the history
Signed-off-by: chensuyue <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
chensuyue and pre-commit-ci[bot] authored Sep 18, 2024
1 parent 1249c4f commit 3d00a33
Show file tree
Hide file tree
Showing 14 changed files with 48 additions and 55 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/docker/compose/agent-compose-cd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

# this file should be run in the root of the repo
services:
comps-agent-langchain:
agent-langchain:
build:
dockerfile: comps/agent/langchain/Dockerfile
image: ${REGISTRY:-opea}/comps-agent-langchain:${TAG:-latest}
image: ${REGISTRY:-opea}/agent-langchain:${TAG:-latest}
4 changes: 0 additions & 4 deletions .github/workflows/docker/compose/dataprep-compose-cd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,6 @@ services:
build:
dockerfile: comps/dataprep/pinecone/langchain/Dockerfile
image: ${REGISTRY:-opea}/dataprep-pinecone:${TAG:-latest}
dataprep-multimodal-redis:
build:
dockerfile: comps/dataprep/multimodal/redis/langchain/Dockerfile
image: ${REGISTRY:-opea}/dataprep-multimodal-redis:${TAG:-latest}
dataprep-vdms:
build:
dockerfile: comps/dataprep/vdms/langchain/Dockerfile
Expand Down
4 changes: 4 additions & 0 deletions .github/workflows/docker/compose/dataprep-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,7 @@ services:
build:
dockerfile: comps/dataprep/vdms/multimodal_langchain/Dockerfile
image: ${REGISTRY:-opea}/dataprep-multimodal-vdms:${TAG:-latest}
dataprep-multimodal-redis:
build:
dockerfile: comps/dataprep/multimodal/redis/langchain/Dockerfile
image: ${REGISTRY:-opea}/dataprep-multimodal-redis:${TAG:-latest}
12 changes: 2 additions & 10 deletions .github/workflows/docker/compose/embeddings-compose-cd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,10 @@ services:
build:
dockerfile: comps/embeddings/tei/llama_index/Dockerfile
image: ${REGISTRY:-opea}/embedding-tei-llama-index:${TAG:-latest}
bridgetower-embedder:
build:
dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile
image: ${REGISTRY:-opea}/bridgetower-embedder:${TAG:-latest}
bridgetower-embedder-gaudi:
embedding-multimodal-bridgetower-gaudi:
build:
dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile.intel_hpu
image: ${REGISTRY:-opea}/bridgetower-embedder-gaudi:${TAG:-latest}
embedding-multimodal:
build:
dockerfile: comps/embeddings/multimodal/multimodal_langchain/Dockerfile
image: ${REGISTRY:-opea}/embedding-multimodal:${TAG:-latest}
image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower-gaudi:${TAG:-latest}
embedding-predictionguard:
build:
dockerfile: comps/embeddings/predictionguard/Dockerfile
Expand Down
8 changes: 8 additions & 0 deletions .github/workflows/docker/compose/embeddings-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,11 @@ services:
build:
dockerfile: comps/embeddings/multimodal_clip/Dockerfile
image: ${REGISTRY:-opea}/embedding-multimodal-clip:${TAG:-latest}
embedding-multimodal-bridgetower:
build:
dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile
image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower:${TAG:-latest}
embedding-multimodal:
build:
dockerfile: comps/embeddings/multimodal/multimodal_langchain/Dockerfile
image: ${REGISTRY:-opea}/embedding-multimodal:${TAG:-latest}
4 changes: 0 additions & 4 deletions .github/workflows/docker/compose/finetuning-compose-cd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@

# this file should be run in the root of the repo
services:
finetuning:
build:
dockerfile: comps/finetuning/Dockerfile
image: ${REGISTRY:-opea}/finetuning:${TAG:-latest}
finetuning-gaudi:
build:
dockerfile: comps/finetuning/Dockerfile.intel_hpu
Expand Down
9 changes: 9 additions & 0 deletions .github/workflows/docker/compose/finetuning-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

# this file should be run in the root of the repo
services:
finetuning:
build:
dockerfile: comps/finetuning/Dockerfile
image: ${REGISTRY:-opea}/finetuning:${TAG:-latest}
10 changes: 0 additions & 10 deletions .github/workflows/docker/compose/guardrails-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,3 @@ services:
build:
dockerfile: comps/guardrails/llama_guard/langchain/Dockerfile
image: ${REGISTRY:-opea}/guardrails-tgi:${TAG:-latest}

guardrails-bias-detection:
build:
dockerfile: comps/guardrails/bias_detection/Dockerfile
image: ${REGISTRY:-opea}/guardrails-bias-detection:${TAG:-latest}

guardrails-toxicity-detection:
build:
dockerfile: comps/guardrails/toxicity_detection/Dockerfile
image: ${REGISTRY:-opea}/guardrails-toxicity-detection:${TAG:-latest}
10 changes: 0 additions & 10 deletions .github/workflows/docker/compose/lvms-compose-cd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,6 @@

# this file should be run in the root of the repo
services:
lvm:
build:
dockerfile: comps/lvms/llava/Dockerfile
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
# Xeon CPU
llava:
build:
dockerfile: comps/lvms/llava/dependency/Dockerfile
image: ${REGISTRY:-opea}/llava:${TAG:-latest}
# Gaudi2 HPU
llava-hpu:
build:
dockerfile: comps/lvms/llava/dependency/Dockerfile.intel_hpu
Expand Down
8 changes: 8 additions & 0 deletions .github/workflows/docker/compose/lvms-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,11 @@ services:
build:
dockerfile: comps/lvms/video-llama/dependency/Dockerfile
image: ${REGISTRY:-opea}/video-llama-lvm-server:${TAG:-latest}
lvm-llava:
build:
dockerfile: comps/lvms/llava/dependency/Dockerfile
image: ${REGISTRY:-opea}/lvm-llava:${TAG:-latest}
lvm-llava-svc:
build:
dockerfile: comps/lvms/llava/Dockerfile
image: ${REGISTRY:-opea}/lvm-llava-svc:${TAG:-latest}
4 changes: 0 additions & 4 deletions .github/workflows/docker/compose/retrievers-compose-cd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,6 @@ services:
build:
dockerfile: comps/retrievers/pathway/langchain/Dockerfile
image: ${REGISTRY:-opea}/retriever-pathway:${TAG:-latest}
multimodal-retriever-redis:
build:
dockerfile: comps/retrievers/multimodal/redis/langchain/Dockerfile
image: ${REGISTRY:-opea}/multimodal-retriever-redis:${TAG:-latest}
retriever-neo4j:
build:
dockerfile: comps/retrievers/neo4j/langchain/Dockerfile
Expand Down
4 changes: 4 additions & 0 deletions .github/workflows/docker/compose/retrievers-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,7 @@ services:
build:
dockerfile: comps/retrievers/vdms/langchain/Dockerfile
image: ${REGISTRY:-opea}/retriever-vdms:${TAG:-latest}
retriever-multimodal-redis:
build:
dockerfile: comps/retrievers/multimodal/redis/langchain/Dockerfile
image: ${REGISTRY:-opea}/retriever-multimodal-redis:${TAG:-latest}
8 changes: 4 additions & 4 deletions comps/agent/langchain/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ python agent.py

```bash
cd GenAIComps/ # back to GenAIComps/ folder
docker build -t opea/comps-agent-langchain:latest -f comps/agent/langchain/Dockerfile .
docker build -t opea/agent-langchain:latest -f comps/agent/langchain/Dockerfile .
```

#### 2.2.2 Start microservices
Expand All @@ -75,7 +75,7 @@ docker run -d --runtime=habana --name "comps-tgi-gaudi-service" -p 8080:80 -v ./
docker logs comps-tgi-gaudi-service

# Agent
docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:latest
docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest

# check status
docker logs comps-langchain-agent-endpoint
Expand All @@ -84,7 +84,7 @@ docker logs comps-langchain-agent-endpoint
> debug mode
>
> ```bash
> docker run --rm --runtime=runc --name="comps-langchain-agent-endpoint" -v ./comps/agent/langchain/:/home/user/comps/agent/langchain/ -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:latest
> docker run --rm --runtime=runc --name="comps-langchain-agent-endpoint" -v ./comps/agent/langchain/:/home/user/comps/agent/langchain/ -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest
> ```
## 🚀 3. Validate Microservice
Expand Down Expand Up @@ -159,7 +159,7 @@ def opea_rag_query(query):

```bash
# Agent
docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v my_tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursive_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:latest
docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v my_tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursive_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest
```

- validate with my_tools
Expand Down
14 changes: 7 additions & 7 deletions tests/agent/test_agent_langchain_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ function build_docker_images() {
echo "Building the docker images"
cd $WORKPATH
echo $WORKPATH
docker build --no-cache -t opea/comps-agent-langchain:comps -f comps/agent/langchain/Dockerfile .
docker build --no-cache -t opea/agent-langchain:comps -f comps/agent/langchain/Dockerfile .
if [ $? -ne 0 ]; then
echo "opea/comps-agent-langchain built fail"
echo "opea/agent-langchain built fail"
exit 1
else
echo "opea/comps-agent-langchain built successful"
echo "opea/agent-langchain built successful"
fi
}

Expand Down Expand Up @@ -50,7 +50,7 @@ function start_tgi_service() {

function start_react_langchain_agent_service() {
echo "Starting react_langchain agent microservice"
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
sleep 5s

docker logs test-comps-agent-endpoint
Expand All @@ -60,15 +60,15 @@ function start_react_langchain_agent_service() {

function start_react_langgraph_agent_service() {
echo "Starting react_langgraph agent microservice"
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langgraph -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langgraph -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
sleep 5s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}

function start_react_langgraph_agent_service_openai() {
echo "Starting react_langgraph agent microservice"
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e model=gpt-4o-mini-2024-07-18 -e strategy=react_langgraph -e llm_engine=openai -e OPENAI_API_KEY=${OPENAI_API_KEY} -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e model=gpt-4o-mini-2024-07-18 -e strategy=react_langgraph -e llm_engine=openai -e OPENAI_API_KEY=${OPENAI_API_KEY} -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
sleep 5s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
Expand All @@ -77,7 +77,7 @@ function start_react_langgraph_agent_service_openai() {

function start_ragagent_agent_service() {
echo "Starting rag agent microservice"
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
sleep 5s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
Expand Down

0 comments on commit 3d00a33

Please sign in to comment.