diff --git a/microservices-connector/helm/Chart.yaml b/microservices-connector/helm/Chart.yaml index 0c66efd3..7572a5e6 100644 --- a/microservices-connector/helm/Chart.yaml +++ b/microservices-connector/helm/Chart.yaml @@ -24,4 +24,6 @@ version: 1.0.0 # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. + appVersion: "v1.0" + diff --git a/microservices-connector/usage_guide.md b/microservices-connector/usage_guide.md index fc9aa41e..9ee8ecec 100644 --- a/microservices-connector/usage_guide.md +++ b/microservices-connector/usage_guide.md @@ -7,17 +7,17 @@ Below are sample use cases: ## Use GMC to compose a chatQnA Pipeline -A sample for chatQnA can be found at config/samples/chatQnA_dataprep_xeon.yaml +A sample for chatQnA can be found at config/samples/ChatQnA/chatQnA_dataprep_xeon.yaml **Deploy chatQnA GMC custom resource** ```sh kubectl create ns chatqa -kubectl apply -f $(pwd)/config/samples/chatQnA_dataprep_xeon.yaml +kubectl apply -f $(pwd)/config/samples/ChatQnA/chatQnA_dataprep_xeon.yaml # To use Gaudi device -#kubectl apply -f $(pwd)/config/samples/chatQnA_dataprep_gaudi.yaml +#kubectl apply -f $(pwd)/config/samples/ChatQnA/chatQnA_dataprep_gaudi.yaml # To use Nvidia GPU -#kubectl apply -f $(pwd)/config/samples/chatQnA_nv.yaml +#kubectl apply -f $(pwd)/config/samples/ChatQnA/chatQnA_nv.yaml ``` **GMC will reconcile chatQnA custom resource and get all related components/services ready** diff --git a/scripts/nvidia/README.md b/scripts/nvidia/README.md index 5966c358..82d498fc 100644 --- a/scripts/nvidia/README.md +++ b/scripts/nvidia/README.md @@ -55,7 +55,7 @@ For more details, refer to [GMC installation](../../microservices-connector/READ Refer to [Usage guide for GMC](../../microservices-connector/usage_guide.md) for more details. -Here provides a simple script to use GMC to compose ChatQnA pipeline. +Here provides a simple script `./gmc-chatqna-pipeline.sh` to use GMC to compose ChatQnA pipeline. #### 3. Test ChatQnA service diff --git a/scripts/nvidia/gmc-chatqna-pipeline.sh b/scripts/nvidia/gmc-chatqna-pipeline.sh index cd0079a6..bca3f434 100755 --- a/scripts/nvidia/gmc-chatqna-pipeline.sh +++ b/scripts/nvidia/gmc-chatqna-pipeline.sh @@ -11,7 +11,7 @@ cd $GenAIInfra_DIR/microservices-connector/ # TODO: to support more examples kubectl create ns chatqa -kubectl apply -f $(pwd)/config/samples/chatQnA_nv.yaml +kubectl apply -f $(pwd)/config/samples/ChatQnA/chatQnA_nv.yaml sleep 2 kubectl get service -n chatqa