Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP]add e2e for codegen #106

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 52 additions & 0 deletions .github/workflows/scripts/e2e/gmc_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ USER_ID=$(whoami)
LOG_PATH=/home/$(whoami)/logs
MOUNT_DIR=/home/$USER_ID/charts-mnt
IMAGE_REPO=${OPEA_IMAGE_REPO:-docker.io}
CODEGEN_NAMESPACE="${APP_NAMESPACE}-codegen"

function install_gmc() {
# Make sure you have to use image tag $VERSION for microservice-connector installation
Expand All @@ -28,12 +29,15 @@ function install_gmc() {
function validate_gmc() {
echo "validate chat-qna"
validate_chatqna
echo "validate codegen"
validate_codegen

}

function cleanup_gmc() {
echo "clean up microservice-connector"
kubectl delete ns $APP_NAMESPACE
kubectl delete ns $CODEGEN_NAMESPACE
kubectl delete ns $SYSTEM_NAMESPACE
kubectl delete crd gmconnectors.gmc.opea.io
# clean up the images
Expand Down Expand Up @@ -94,6 +98,54 @@ function validate_chatqna() {
fi
}

function validate_codegen() {

# todo select gaudi or xeon
kubectl create ns $CODEGEN_NAMESPACE
sed -i "s|namespace: codegen|namespace: $CODEGEN_NAMESPACE|g" $(pwd)/config/samples/codegen.yaml
kubectl apply -f $(pwd)/config/samples/codegen.yaml

# Wait until the router service is ready
echo "Waiting for the codegen router service to be ready..."
wait_until_pod_ready "codegen router" $CODEGEN_NAMESPACE "router-service"
output=$(kubectl get pods -n $CODEGEN_NAMESPACE)
echo $output


# deploy client pod for testing
kubectl create deployment client-test -n $CODEGEN_NAMESPACE --image=python:3.8.13 -- sleep infinity

# wait for client pod ready
wait_until_pod_ready "client-test" $CODEGEN_NAMESPACE "client-test"
# giving time to populating data
sleep 60

kubectl get pods -n $CODEGEN_NAMESPACE
# send request to codegen
export CLIENT_POD=$(kubectl get pod -n $CODEGEN_NAMESPACE -l app=client-test -o jsonpath={.items..metadata.name})
echo "$CLIENT_POD"
accessUrl=$(kubectl get gmc -n $CODEGEN_NAMESPACE -o jsonpath="{.items[?(@.metadata.name=='chatqa')].status.accessUrl}")
kubectl exec "$CLIENT_POD" -n $CODEGEN_NAMESPACE -- curl $accessUrl -X POST -d '{"messages": "def print_hello_world():"}' -H 'Content-Type: application/json' > $LOG_PATH/gmc_codegen.log
exit_code=$?
if [ $exit_code -ne 0 ]; then
echo "chatqna failed, please check the logs in ${LOG_PATH}!"
exit 1
fi

echo "Checking response results, make sure the output is reasonable. "
local status=false
if [[ -f $LOG_PATH/gmc_codegen.log ]] && \
[[ $(grep -c "print" $LOG_PATH/gmc_codegen.log) != 0 ]]; then
status=true
fi
if [ $status == false ]; then
echo "Response check failed, please check the logs in artifacts!"
exit 1
else
echo "Response check succeed!"
fi
}

function init_gmc() {
# Copy manifest into gmc
mkdir -p $(pwd)/config/manifests
Expand Down
16 changes: 1 addition & 15 deletions microservices-connector/config/samples/codegen.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,6 @@ spec:
routerConfig:
name: router
serviceName: router-service
config:
no_proxy: ".codegen.svc.cluster.local"
http_proxy: "insert-your-http-proxy-here"
https_proxy: "insert-your-https-proxy-here"
nodes:
root:
routerType: Sequence
Expand All @@ -26,21 +22,11 @@ spec:
internalService:
serviceName: llm-service
config:
no_proxy: ".codegen.svc.cluster.local"
http_proxy: "insert-your-http-proxy-here"
https_proxy: "insert-your-https-proxy-here"
tgi_endpoint: http://tgi-service.codegen.svc.cluster.local:9009
gmcTokenSecret: gmc-tokens
endpoint: /v1/chat/completions
- name: Tgi
internalService:
serviceName: tgi-service
config:
no_proxy: ".codegen.svc.cluster.local"
http_proxy: "insert-your-http-proxy-here"
https_proxy: "insert-your-https-proxy-here"
gmcTokenSecret: gmc-tokens
hostPath: /root/GMC/data/tgi
modelId: ise-uiuc/Magicoder-S-DS-6.7B
LLM_MODEL_ID: ise-uiuc/Magicoder-S-DS-6.7B
endpoint: /generate
isDownstreamService: true
Loading