From 03e8e67baa7d78e776e870d18b111367621850a6 Mon Sep 17 00:00:00 2001 From: Tyler Titsworth Date: Wed, 18 Sep 2024 13:41:47 -0700 Subject: [PATCH] TensorFlow Serving on XPU Chart (#359) Signed-off-by: tylertitsworth --- .../charts/tensorflow-serving/.helmignore | 23 +++++ .../charts/tensorflow-serving/Chart.yaml | 42 ++++++++++ workflows/charts/tensorflow-serving/README.md | 31 +++++++ .../tensorflow-serving/templates/NOTES.txt | 19 +++++ .../tensorflow-serving/templates/_helpers.tpl | 51 +++++++++++ .../templates/deployment.yaml | 84 +++++++++++++++++++ .../tensorflow-serving/templates/pvc.yaml | 29 +++++++ .../tensorflow-serving/templates/service.yaml | 31 +++++++ .../templates/tests/test-connection.yaml | 29 +++++++ .../charts/tensorflow-serving/values.yaml | 53 ++++++++++++ .../tgi/templates/tests/test-connection.yaml | 2 +- 11 files changed, 393 insertions(+), 1 deletion(-) create mode 100644 workflows/charts/tensorflow-serving/.helmignore create mode 100644 workflows/charts/tensorflow-serving/Chart.yaml create mode 100644 workflows/charts/tensorflow-serving/README.md create mode 100644 workflows/charts/tensorflow-serving/templates/NOTES.txt create mode 100644 workflows/charts/tensorflow-serving/templates/_helpers.tpl create mode 100644 workflows/charts/tensorflow-serving/templates/deployment.yaml create mode 100644 workflows/charts/tensorflow-serving/templates/pvc.yaml create mode 100644 workflows/charts/tensorflow-serving/templates/service.yaml create mode 100644 workflows/charts/tensorflow-serving/templates/tests/test-connection.yaml create mode 100644 workflows/charts/tensorflow-serving/values.yaml diff --git a/workflows/charts/tensorflow-serving/.helmignore b/workflows/charts/tensorflow-serving/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/workflows/charts/tensorflow-serving/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/workflows/charts/tensorflow-serving/Chart.yaml b/workflows/charts/tensorflow-serving/Chart.yaml new file mode 100644 index 00000000..e6a61952 --- /dev/null +++ b/workflows/charts/tensorflow-serving/Chart.yaml @@ -0,0 +1,42 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v2 +name: tensorflow-serving-on-intel +description: TensorFlow Serving is a flexible, high-performance serving system for machine learning models, designed for production environments. TensorFlow Serving makes it easy to deploy new algorithms and experiments, while keeping the same server architecture and APIs. TensorFlow Serving provides out-of-the-box integration with TensorFlow models, but can be easily extended to serve other types of models and data. + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +maintainers: + - name: tylertitsworth + email: tyler.titsworth@intel.com + url: https://github.com/tylertitsworth +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/workflows/charts/tensorflow-serving/README.md b/workflows/charts/tensorflow-serving/README.md new file mode 100644 index 00000000..bfbb2900 --- /dev/null +++ b/workflows/charts/tensorflow-serving/README.md @@ -0,0 +1,31 @@ +# tensorflow-serving-on-intel + +![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.16.0](https://img.shields.io/badge/AppVersion-1.16.0-informational?style=flat-square) + +TensorFlow Serving is a flexible, high-performance serving system for machine learning models, designed for production environments. TensorFlow Serving makes it easy to deploy new algorithms and experiments, while keeping the same server architecture and APIs. TensorFlow Serving provides out-of-the-box integration with TensorFlow models, but can be easily extended to serve other types of models and data. + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| tylertitsworth | | | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| deploy.env | object | `{"configMapName":"intel-proxy-config","enabled":true}` | Add Environment mapping | +| deploy.image | string | `"intel/intel-extension-for-tensorflow:serving-gpu"` | Intel Extension for Tensorflow Serving image | +| deploy.modelName | string | `""` | Model Name | +| deploy.replicas | int | `1` | Number of pods | +| deploy.resources.limits | object | `{"cpu":"4000m","gpu.intel.com/i915":1,"memory":"1Gi"}` | Maximum resources per pod | +| deploy.resources.limits."gpu.intel.com/i915" | int | `1` | Intel GPU Device Configuration | +| deploy.resources.requests | object | `{"cpu":"1000m","memory":"512Mi"}` | Minimum resources per pod | +| deploy.storage.nfs | object | `{"enabled":false,"path":"nil","readOnly":true,"server":"nil"}` | Network File System (NFS) storage for models | +| fullnameOverride | string | `""` | Full qualified Domain Name | +| nameOverride | string | `""` | Name of the serving service | +| pvc.size | string | `"5Gi"` | Size of the storage | +| service.type | string | `"NodePort"` | Type of service | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/workflows/charts/tensorflow-serving/templates/NOTES.txt b/workflows/charts/tensorflow-serving/templates/NOTES.txt new file mode 100644 index 00000000..fb69969c --- /dev/null +++ b/workflows/charts/tensorflow-serving/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "tensorflow-serving.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "tensorflow-serving.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "tensorflow-serving.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "tensorflow-serving.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} +2. Make a prediction + curl http://$NODE_IP:$NODE_PORT/v1/models/{{ .Values.deploy.modelName }} + curl -X POST http://$NODE_IP:$NODE_PORT/v1/models/{{ .Values.deploy.modelName }}:predict -d '{"data": []}' diff --git a/workflows/charts/tensorflow-serving/templates/_helpers.tpl b/workflows/charts/tensorflow-serving/templates/_helpers.tpl new file mode 100644 index 00000000..2afbfd70 --- /dev/null +++ b/workflows/charts/tensorflow-serving/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "tensorflow-serving.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "tensorflow-serving.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "tensorflow-serving.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "tensorflow-serving.labels" -}} +helm.sh/chart: {{ include "tensorflow-serving.chart" . }} +{{ include "tensorflow-serving.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "tensorflow-serving.selectorLabels" -}} +app.kubernetes.io/name: {{ include "tensorflow-serving.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/workflows/charts/tensorflow-serving/templates/deployment.yaml b/workflows/charts/tensorflow-serving/templates/deployment.yaml new file mode 100644 index 00000000..e6a1fcf6 --- /dev/null +++ b/workflows/charts/tensorflow-serving/templates/deployment.yaml @@ -0,0 +1,84 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +{{- $name := .Values.deploy.modelName | required ".Values.deploy.modelName is required." -}} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "tensorflow-serving.fullname" . }} + labels: + {{- include "tensorflow-serving.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.deploy.replicas }} + selector: + matchLabels: + {{- include "tensorflow-serving.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "tensorflow-serving.labels" . | nindent 8 }} + spec: + securityContext: + fsGroup: 1000 + runAsUser: 1000 + containers: + - name: tensorflow-serving + image: {{ .Values.deploy.image }} + {{- if eq .Values.deploy.env.enabled true }} + envFrom: + - configMapRef: + name: {{ .Values.deploy.env.configMapName }} + {{- end }} + env: + - name: MODEL_NAME + value: {{ .Values.deploy.modelName }} + ports: + - name: rest + containerPort: 8500 + protocol: TCP + - name: grpc + containerPort: 8501 + protocol: TCP + readinessProbe: + tcpSocket: + port: rest + initialDelay: 15 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /dev/shm + name: dshm + {{- if .Values.deploy.storage.nfs.enabled }} + - name: model + mountPath: /models/{{ .Values.deploy.modelName }} + {{- else }} + - name: model + mountPath: /models/{{ .Values.deploy.modelName }} + {{- end }} + resources: + {{- toYaml .Values.deploy.resources | nindent 12 }} + volumes: + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.deploy.storage.nfs.enabled }} + - name: model + nfs: + server: {{ .Values.deploy.storage.nfs.server }} + path: {{ .Values.deploy.storage.nfs.path }} + readOnly: {{ .Values.deploy.storage.nfs.readOnly }} + {{- else }} + - name: model + persistentVolumeClaim: + claimName: {{ include "tensorflow-serving.fullname" . }}-model-dir + {{- end }} diff --git a/workflows/charts/tensorflow-serving/templates/pvc.yaml b/workflows/charts/tensorflow-serving/templates/pvc.yaml new file mode 100644 index 00000000..2cf9040d --- /dev/null +++ b/workflows/charts/tensorflow-serving/templates/pvc.yaml @@ -0,0 +1,29 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +{{- if not .Values.deploy.storage.nfs.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "tensorflow-serving.fullname" . }}-model-dir + labels: + {{- include "tensorflow-serving.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.pvc.size }} +{{- end }} diff --git a/workflows/charts/tensorflow-serving/templates/service.yaml b/workflows/charts/tensorflow-serving/templates/service.yaml new file mode 100644 index 00000000..2eab7890 --- /dev/null +++ b/workflows/charts/tensorflow-serving/templates/service.yaml @@ -0,0 +1,31 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "tensorflow-serving.fullname" . }} + labels: + {{- include "tensorflow-serving.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - name: rest + port: 8500 + targetPort: rest + - name: grpc + port: 8501 + targetPort: grpc + selector: + {{- include "tensorflow-serving.selectorLabels" . | nindent 4 }} diff --git a/workflows/charts/tensorflow-serving/templates/tests/test-connection.yaml b/workflows/charts/tensorflow-serving/templates/tests/test-connection.yaml new file mode 100644 index 00000000..0fe61c9a --- /dev/null +++ b/workflows/charts/tensorflow-serving/templates/tests/test-connection.yaml @@ -0,0 +1,29 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "tensorflow-serving.fullname" . }}-test-connection" + labels: + {{- include "tensorflow-serving.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: info + image: curlimages/curl + command: ['sh', '-c'] + args: ['curl -f {{ include "tensorflow-serving.fullname" . }}:8501/v1/models/{{ .Values.deploy.modelName}}'] + restartPolicy: OnFailure diff --git a/workflows/charts/tensorflow-serving/values.yaml b/workflows/charts/tensorflow-serving/values.yaml new file mode 100644 index 00000000..39ed23af --- /dev/null +++ b/workflows/charts/tensorflow-serving/values.yaml @@ -0,0 +1,53 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -- Name of the serving service +nameOverride: "" +# -- Full qualified Domain Name +fullnameOverride: "" +deploy: + # -- Intel Extension for Tensorflow Serving image + image: intel/intel-extension-for-tensorflow:serving-gpu + # -- Add Environment mapping + env: + configMapName: intel-proxy-config + enabled: true + # -- Model Name + modelName: "" + # -- Number of pods + replicas: 1 + resources: + # -- Maximum resources per pod + limits: + cpu: 4000m + memory: 1Gi + # -- Intel GPU Device Configuration + gpu.intel.com/i915: 1 + # -- Minimum resources per pod + requests: + cpu: 1000m + memory: 512Mi + storage: + # -- Network File System (NFS) storage for models + nfs: + enabled: false + server: nil + path: nil + readOnly: true +service: + # -- Type of service + type: NodePort +pvc: + # -- Size of the storage + size: 5Gi diff --git a/workflows/charts/tgi/templates/tests/test-connection.yaml b/workflows/charts/tgi/templates/tests/test-connection.yaml index 113d8acf..007086c4 100644 --- a/workflows/charts/tgi/templates/tests/test-connection.yaml +++ b/workflows/charts/tgi/templates/tests/test-connection.yaml @@ -25,5 +25,5 @@ spec: - name: info image: curlimages/curl command: ['sh', '-c'] - args: ['curl --noproxy "*" -f {{ include "tgi.fullname" . }}:{{ .Values.service.port }}/info'] + args: ['curl -f {{ include "tgi.fullname" . }}:{{ .Values.service.port }}/info'] restartPolicy: OnFailure