From 76b37f1b5841c13b915e3cb0ef223b9d80d1b90c Mon Sep 17 00:00:00 2001 From: chensuyue Date: Wed, 11 Sep 2024 23:57:49 +0800 Subject: [PATCH] fix test Signed-off-by: chensuyue --- tests/llms/test_llms_text-generation_tgi.sh | 4 ++-- tests/retrievers/test_retrievers_pinecone_langchain.sh | 2 +- tests/retrievers/test_retrievers_redis_langchain.sh | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/llms/test_llms_text-generation_tgi.sh b/tests/llms/test_llms_text-generation_tgi.sh index d8b01e7426..c62a7f77ae 100644 --- a/tests/llms/test_llms_text-generation_tgi.sh +++ b/tests/llms/test_llms_text-generation_tgi.sh @@ -24,7 +24,7 @@ function start_service() { export your_hf_llm_model=$1 # Remember to set HF_TOKEN before invoking this test! export HF_TOKEN=${HF_TOKEN} - docker run -d --name="test-comps-llm-tgi-endpoint" -p $tgi_endpoint_port:80 -v ./data:/data --shm-size 1g -e HF_TOKEN=${HF_TOKEN} ghcr.io/huggingface/text-generation-inference:2.1.0 --model-id ${your_hf_llm_model} --max-input-tokens 1024 --max-total-tokens 2048 + docker run -d --name="test-comps-llm-tgi-endpoint" -p $tgi_endpoint_port:80 -v ~/.cache/huggingface/hub:/data --shm-size 1g -e HF_TOKEN=${HF_TOKEN} ghcr.io/huggingface/text-generation-inference:2.1.0 --model-id ${your_hf_llm_model} --max-input-tokens 1024 --max-total-tokens 2048 export TGI_LLM_ENDPOINT="http://${ip_address}:${tgi_endpoint_port}" llm_port=5005 @@ -34,7 +34,7 @@ function start_service() { # check whether tgi is fully ready n=0 until [[ "$n" -ge 100 ]] || [[ $ready == true ]]; do - docker logs test-comps-llm-tgi-endpoint >> ${LOG_PATH}/llm-tgi.log + docker logs test-comps-llm-tgi-endpoint >> ${LOG_PATH}/${your_hf_llm_model}-llm-tgi.log n=$((n+1)) if grep -q Connected ${LOG_PATH}/llm-tgi.log; then break diff --git a/tests/retrievers/test_retrievers_pinecone_langchain.sh b/tests/retrievers/test_retrievers_pinecone_langchain.sh index 4077f4f390..643523b7d7 100644 --- a/tests/retrievers/test_retrievers_pinecone_langchain.sh +++ b/tests/retrievers/test_retrievers_pinecone_langchain.sh @@ -33,7 +33,7 @@ function start_service() { export HUGGINGFACEHUB_API_TOKEN=$HF_TOKEN retriever_port=5054 unset http_proxy - docker run -d --name="test-comps-retriever-pinecone-server" -p ${retriever_port}:7000 --ipc=host -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e PINECONE_API_KEY=$PINECONE_API_KEY -e PINECONE_INDEX_NAME=$PINECONE_INDEX_NAME -e INDEX_NAME=$PINECONE_INDEX_NAME opea/retriever-pinecone:comps + docker run -d --name="test-comps-retriever-pinecone-server" -p ${retriever_port}:7000 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e PINECONE_API_KEY=$PINECONE_API_KEY -e PINECONE_INDEX_NAME=$PINECONE_INDEX_NAME -e INDEX_NAME=$PINECONE_INDEX_NAME opea/retriever-pinecone:comps sleep 2m } diff --git a/tests/retrievers/test_retrievers_redis_langchain.sh b/tests/retrievers/test_retrievers_redis_langchain.sh index 773be81e64..dd34a2a0f2 100644 --- a/tests/retrievers/test_retrievers_redis_langchain.sh +++ b/tests/retrievers/test_retrievers_redis_langchain.sh @@ -34,9 +34,10 @@ function start_service() { # redis retriever export REDIS_URL="redis://${ip_address}:5010" export INDEX_NAME="rag-redis" + export HUGGINGFACEHUB_API_TOKEN=$HF_TOKEN retriever_port=5435 # unset http_proxy - docker run -d --name="test-comps-retriever-redis-server" -p ${retriever_port}:7000 --ipc=host -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e REDIS_URL=$REDIS_URL -e INDEX_NAME=$INDEX_NAME opea/retriever-redis:comps + docker run -d --name="test-comps-retriever-redis-server" -p ${retriever_port}:7000 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e REDIS_URL=$REDIS_URL -e INDEX_NAME=$INDEX_NAME opea/retriever-redis:comps sleep 3m }