-
Notifications
You must be signed in to change notification settings - Fork 62
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add Nvidia GPU support for ChatQnA (#225)
1. Add Helm-charts support 2. Add manifests support Signed-off-by: PeterYang12 <[email protected]>
- Loading branch information
1 parent
70205e5
commit 868103b
Showing
5 changed files
with
233 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
# Copyright (C) 2024 Intel Corporation | ||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
# Default values for chatqna. | ||
# This is a YAML-formatted file. | ||
# Declare variables to be passed into your templates. | ||
|
||
replicaCount: 1 | ||
|
||
image: | ||
repository: opea/chatqna:latest | ||
pullPolicy: IfNotPresent | ||
# Overrides the image tag whose default is the chart appVersion. | ||
# tag: "1.0" | ||
|
||
port: 8888 | ||
service: | ||
type: ClusterIP | ||
port: 8888 | ||
|
||
securityContext: | ||
readOnlyRootFilesystem: true | ||
allowPrivilegeEscalation: false | ||
runAsNonRoot: true | ||
runAsUser: 1000 | ||
capabilities: | ||
drop: | ||
- ALL | ||
seccompProfile: | ||
type: RuntimeDefault | ||
|
||
# To override values in subchart tgi | ||
tgi: | ||
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3 | ||
# LLM_MODEL_ID: /data/OpenCodeInterpreter-DS-6.7B | ||
image: | ||
repository: ghcr.io/huggingface/text-generation-inference | ||
tag: "2.0" | ||
resources: | ||
limits: | ||
nvidia.com/gpu: 1 | ||
|
||
global: | ||
http_proxy: | ||
https_proxy: | ||
no_proxy: | ||
HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here" | ||
LANGCHAIN_TRACING_V2: false | ||
LANGCHAIN_API_KEY: "insert-your-langchain-key-here" | ||
# set modelUseHostPath to host directory if you want to use hostPath volume for model storage | ||
# comment out modeluseHostPath if you want to download the model from huggingface | ||
modelUseHostPath: /mnt/opea-models |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
# Copyright (C) 2024 Intel Corporation | ||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
# Default values for tgi. | ||
# This is a YAML-formatted file. | ||
# Declare variables to be passed into your templates. | ||
|
||
replicaCount: 1 | ||
|
||
port: 2080 | ||
|
||
image: | ||
repository: ghcr.io/huggingface/text-generation-inference | ||
pullPolicy: IfNotPresent | ||
# Overrides the image tag whose default is the chart appVersion. | ||
tag: "2.0" | ||
|
||
imagePullSecrets: [] | ||
nameOverride: "" | ||
fullnameOverride: "" | ||
|
||
podAnnotations: {} | ||
|
||
podSecurityContext: {} | ||
# fsGroup: 2000 | ||
|
||
securityContext: | ||
readOnlyRootFilesystem: true | ||
allowPrivilegeEscalation: false | ||
runAsNonRoot: true | ||
runAsUser: 1000 | ||
capabilities: | ||
drop: | ||
- ALL | ||
seccompProfile: | ||
type: RuntimeDefault | ||
|
||
service: | ||
type: ClusterIP | ||
|
||
resources: | ||
limits: | ||
nvidia.com/gpu: 1 | ||
|
||
nodeSelector: {} | ||
|
||
tolerations: [] | ||
|
||
affinity: {} | ||
|
||
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3 | ||
|
||
global: | ||
http_proxy: "" | ||
https_proxy: "" | ||
no_proxy: "" | ||
HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here" | ||
# set modelUseHostPath to host directory if you want to use hostPath volume for model storage | ||
# comment out modeluseHostPath if you want to download the model from huggingface | ||
modelUseHostPath: /mnt/opea-models |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,114 @@ | ||
--- | ||
# Source: tgi/templates/configmap.yaml | ||
# Copyright (C) 2024 Intel Corporation | ||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
apiVersion: v1 | ||
kind: ConfigMap | ||
metadata: | ||
name: tgi-config | ||
labels: | ||
helm.sh/chart: tgi-0.8.0 | ||
app.kubernetes.io/name: tgi | ||
app.kubernetes.io/instance: tgi | ||
app.kubernetes.io/version: "2.1.0" | ||
app.kubernetes.io/managed-by: Helm | ||
data: | ||
MODEL_ID: "Intel/neural-chat-7b-v3-3" | ||
PORT: "2080" | ||
HUGGING_FACE_HUB_TOKEN: "insert-your-huggingface-token-here" | ||
HF_TOKEN: "insert-your-huggingface-token-here" | ||
MAX_INPUT_TOKENS: "1024" | ||
MAX_TOTAL_TOKENS: "4096" | ||
http_proxy: "" | ||
https_proxy: "" | ||
no_proxy: "" | ||
HABANA_LOGS: "/tmp/habana_logs" | ||
NUMBA_CACHE_DIR: "/tmp" | ||
TRANSFORMERS_CACHE: "/tmp/transformers_cache" | ||
HF_HOME: "/tmp/.cache/huggingface" | ||
--- | ||
# Source: tgi/templates/service.yaml | ||
# Copyright (C) 2024 Intel Corporation | ||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
apiVersion: v1 | ||
kind: Service | ||
metadata: | ||
name: tgi | ||
labels: | ||
helm.sh/chart: tgi-0.8.0 | ||
app.kubernetes.io/name: tgi | ||
app.kubernetes.io/instance: tgi | ||
app.kubernetes.io/version: "2.1.0" | ||
app.kubernetes.io/managed-by: Helm | ||
spec: | ||
type: ClusterIP | ||
ports: | ||
- port: 80 | ||
targetPort: 2080 | ||
protocol: TCP | ||
name: tgi | ||
selector: | ||
app.kubernetes.io/name: tgi | ||
app.kubernetes.io/instance: tgi | ||
--- | ||
# Source: tgi/templates/deployment.yaml | ||
# Copyright (C) 2024 Intel Corporation | ||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
apiVersion: apps/v1 | ||
kind: Deployment | ||
metadata: | ||
name: tgi | ||
labels: | ||
helm.sh/chart: tgi-0.8.0 | ||
app.kubernetes.io/name: tgi | ||
app.kubernetes.io/instance: tgi | ||
app.kubernetes.io/version: "2.1.0" | ||
app.kubernetes.io/managed-by: Helm | ||
spec: | ||
replicas: 1 | ||
selector: | ||
matchLabels: | ||
app.kubernetes.io/name: tgi | ||
app.kubernetes.io/instance: tgi | ||
template: | ||
metadata: | ||
labels: | ||
app.kubernetes.io/name: tgi | ||
app.kubernetes.io/instance: tgi | ||
spec: | ||
securityContext: | ||
{} | ||
containers: | ||
- name: tgi | ||
envFrom: | ||
- configMapRef: | ||
name: tgi-config | ||
- configMapRef: | ||
name: extra-env-config | ||
optional: true | ||
securityContext: | ||
{} | ||
image: "ghcr.io/huggingface/text-generation-inference:2.0" | ||
imagePullPolicy: IfNotPresent | ||
volumeMounts: | ||
- mountPath: /data | ||
name: model-volume | ||
- mountPath: /tmp | ||
name: tmp | ||
ports: | ||
- name: http | ||
containerPort: 2080 | ||
protocol: TCP | ||
resources: | ||
limits: | ||
nvidia.com/gpu: 1 | ||
volumes: | ||
- name: model-volume | ||
hostPath: | ||
path: /mnt/opea-models | ||
type: Directory | ||
- name: tmp | ||
emptyDir: {} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters