diff --git a/.circleci/config.yml b/.circleci/config.yml index e1bce111f..8de4c83c1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -16,7 +16,7 @@ jobs: - image: docker.mirror.hashicorp.services/cimg/go:1.16 environment: BATS_VERSION: "1.3.0" - CHART_VERIFIER_VERSION: "1.0.0" + CHART_VERIFIER_VERSION: "1.2.1" steps: - checkout - run: @@ -94,7 +94,7 @@ workflows: - bats-unit-test filters: branches: - only: master + only: main update-helm-charts-index: jobs: - update-helm-charts-index: diff --git a/.gitignore b/.gitignore index 6992d23f4..2e23aca27 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ vaul-helm-dev-creds.json ./test/unit/vaul-helm-dev-creds.json ./test/acceptance/values.yaml ./test/acceptance/values.yml +.idea diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c58777ac..286e60714 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,61 @@ ## Unreleased +## 0.17.1 (October 25th, 2021) + +Improvements: + * Add option for Ingress PathType [GH-634](https://github.com/hashicorp/vault-helm/pull/634) + +## 0.17.0 (October 21st, 2021) + +KNOWN ISSUES: +* The chart will fail to deploy on Kubernetes 1.19+ with `server.ingress.enabled=true` because no `pathType` is set + +CHANGES: +* Vault image default 1.8.4 +* Vault K8s image default 0.14.0 + +Improvements: +* Support Ingress stable networking API [GH-590](https://github.com/hashicorp/vault-helm/pull/590) +* Support setting the `externalTrafficPolicy` for `LoadBalancer` and `NodePort` service types [GH-626](https://github.com/hashicorp/vault-helm/pull/626) +* Support setting ingressClassName on server Ingress [GH-630](https://github.com/hashicorp/vault-helm/pull/630) + +Bugs: +* Ensure `kubeletRootDir` volume path and mounts are the same when `csi.daemonSet.kubeletRootDir` is overridden [GH-628](https://github.com/hashicorp/vault-helm/pull/628) + +## 0.16.1 (September 29th, 2021) + +CHANGES: +* Vault image default 1.8.3 +* Vault K8s image default 0.13.1 + +## 0.16.0 (September 16th, 2021) + +CHANGES: +* Support for deploying a leader-elector container with the [vault-k8s injector](https://github.com/hashicorp/vault-k8s) injector will be removed in version 0.18.0 of this chart since vault-k8s now uses an internal mechanism to determine leadership. To enable the deployment of the leader-elector container for use with vault-k8s 0.12.0 and earlier, set `useContainer=true`. + +Improvements: + * Make CSI provider `hostPaths` configurable via `csi.daemonSet.providersDir` and `csi.daemonSet.kubeletRootDir` [GH-603](https://github.com/hashicorp/vault-helm/pull/603) + * Support vault-k8s internal leader election [GH-568](https://github.com/hashicorp/vault-helm/pull/568) [GH-607](https://github.com/hashicorp/vault-helm/pull/607) + +## 0.15.0 (August 23rd, 2021) + +Improvements: +* Add imagePullSecrets on server test [GH-572](https://github.com/hashicorp/vault-helm/pull/572) +* Add injector.webhookAnnotations chart option [GH-584](https://github.com/hashicorp/vault-helm/pull/584) + +## 0.14.0 (July 28th, 2021) + +Features: +* Added templateConfig.exitOnRetryFailure annotation for the injector [GH-560](https://github.com/hashicorp/vault-helm/pull/560) + +Improvements: +* Support configuring pod tolerations, pod affinity, and node selectors as YAML [GH-565](https://github.com/hashicorp/vault-helm/pull/565) +* Set the default vault image to come from the hashicorp organization [GH-567](https://github.com/hashicorp/vault-helm/pull/567) +* Add support for running the acceptance tests against a local `kind` cluster [GH-567](https://github.com/hashicorp/vault-helm/pull/567) +* Add `server.ingress.activeService` to configure if the ingress should use the active service [GH-570](https://github.com/hashicorp/vault-helm/pull/570) +* Add `server.route.activeService` to configure if the route should use the active service [GH-570](https://github.com/hashicorp/vault-helm/pull/570) +* Support configuring `global.imagePullSecrets` from a string array [GH-576](https://github.com/hashicorp/vault-helm/pull/576) + ## 0.13.0 (June 17th, 2021) Improvements: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f83d56747..f1c160000 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,7 +26,7 @@ quickly merge or address your contributions. * Make sure you test against the latest released version. It is possible we already fixed the bug you're experiencing. Even better is if you can test - against `master`, as bugs are fixed regularly but new versions are only + against `main`, as bugs are fixed regularly but new versions are only released every few months. * Provide steps to reproduce the issue, and if possible include the expected @@ -121,7 +121,7 @@ may not be properly cleaned up. We recommend recycling the Kubernetes cluster to start from a clean slate. **Note:** There is a Terraform configuration in the -[`test/terraform/`](https://github.com/hashicorp/vault-helm/tree/master/test/terraform) directory +[`test/terraform/`](https://github.com/hashicorp/vault-helm/tree/main/test/terraform) directory that can be used to quickly bring up a GKE cluster and configure `kubectl` and `helm` locally. This can be used to quickly spin up a test cluster for acceptance tests. Unit tests _do not_ require a running Kubernetes diff --git a/Chart.yaml b/Chart.yaml index 045116268..248ab9b13 100644 --- a/Chart.yaml +++ b/Chart.yaml @@ -1,9 +1,10 @@ apiVersion: v2 name: vault -version: 0.13.0 -appVersion: 1.7.3 +version: 0.17.1 +appVersion: 1.8.4 kubeVersion: ">= 1.14.0-0" -description: Official HashiCorp Vault Chart +description: Install and configure Vault on Kubernetes. + home: https://www.vaultproject.io icon: https://github.com/hashicorp/vault/raw/f22d202cde2018f9455dec755118a9b84586e082/Vault_PrimaryLogo_Black.png keywords: ["vault", "security", "encryption", "secrets", "management", "automation", "infrastructure"] diff --git a/Makefile b/Makefile index 1b3020c59..fb35f977f 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,19 @@ CLOUDSDK_CORE_PROJECT?=vault-helm-dev-246514 # set to run a single test - e.g acceptance/server-ha-enterprise-dr.bats ACCEPTANCE_TESTS?=acceptance + +# filter bats unit tests to run. +UNIT_TESTS_FILTER?='.*' + +# set to 'true' to run acceptance tests locally in a kind cluster +LOCAL_ACCEPTANCE_TESTS?=false + +# kind cluster name +KIND_CLUSTER_NAME?=vault-helm + +# kind k8s version +KIND_K8S_VERSION?=v1.20.2 + # Generate json schema for chart values. See test/README.md for more details. values-schema: helm schema-gen values.yaml > values.schema.json @@ -12,7 +25,7 @@ test-image: @docker build --rm -t $(TEST_IMAGE) -f $(CURDIR)/test/docker/Test.dockerfile $(CURDIR) test-unit: - @docker run -it -v ${PWD}:/helm-test $(TEST_IMAGE) bats /helm-test/test/unit + @docker run --rm -it -v ${PWD}:/helm-test $(TEST_IMAGE) bats -f $(UNIT_TESTS_FILTER) /helm-test/test/unit test-bats: test-unit test-acceptance @@ -21,14 +34,19 @@ test: test-image test-bats # run acceptance tests on GKE # set google project/credential vars above test-acceptance: +ifeq ($(LOCAL_ACCEPTANCE_TESTS),true) + make setup-kind acceptance +else @docker run -it -v ${PWD}:/helm-test \ -e GOOGLE_CREDENTIALS=${GOOGLE_CREDENTIALS} \ -e CLOUDSDK_CORE_PROJECT=${CLOUDSDK_CORE_PROJECT} \ -e KUBECONFIG=/helm-test/.kube/config \ + -e VAULT_LICENSE_CI=${VAULT_LICENSE_CI} \ -w /helm-test \ $(TEST_IMAGE) \ make acceptance - +endif + # destroy GKE cluster using terraform test-destroy: @docker run -it -v ${PWD}:/helm-test \ @@ -51,7 +69,9 @@ test-provision: # this target is for running the acceptance tests # it is run in the docker container above when the test-acceptance target is invoked acceptance: +ifneq ($(LOCAL_ACCEPTANCE_TESTS),true) gcloud auth activate-service-account --key-file=${GOOGLE_CREDENTIALS} +endif bats test/${ACCEPTANCE_TESTS} # this target is for provisioning the GKE cluster @@ -66,4 +86,17 @@ provision-cluster: destroy-cluster: terraform destroy -auto-approve +# create a kind cluster for running the acceptance tests locally +setup-kind: + kind get clusters | grep -q "^${KIND_CLUSTER_NAME}$$" || \ + kind create cluster \ + --image kindest/node:${KIND_K8S_VERSION} \ + --name ${KIND_CLUSTER_NAME} \ + --config $(CURDIR)/test/kind/config.yaml + kubectl config use-context kind-${KIND_CLUSTER_NAME} + +# delete the kind cluster +delete-kind: + kind delete cluster --name ${KIND_CLUSTER_NAME} || : + .PHONY: values-schema test-image test-unit test-bats test test-acceptance test-destroy test-provision acceptance provision-cluster destroy-cluster diff --git a/README.md b/README.md index 8096d7f79..03afc2b3a 100644 --- a/README.md +++ b/README.md @@ -43,10 +43,9 @@ $ helm repo add hashicorp https://helm.releases.hashicorp.com $ helm install vault hashicorp/vault ``` -Please see the many options supported in the `values.yaml` file. These are also -fully documented directly on the [Vault -website](https://www.vaultproject.io/docs/platform/k8s/helm) along with more -detailed installation instructions. +Please see the many options supported in the `values.yaml` +file. These are also fully documented directly on the +[Vault website](https://www.vaultproject.io/docs/platform/k8s/helm.html). ## Customizations diff --git a/templates/_helpers.tpl b/templates/_helpers.tpl index 5e3269f78..e5ea9264d 100644 --- a/templates/_helpers.tpl +++ b/templates/_helpers.tpl @@ -232,7 +232,12 @@ Set's the affinity for pod placement when running in standalone and HA modes. {{- define "vault.affinity" -}} {{- if and (ne .mode "dev") .Values.server.affinity }} affinity: - {{ tpl .Values.server.affinity . | nindent 8 | trim }} + {{ $tp := typeOf .Values.server.affinity }} + {{- if eq $tp "string" }} + {{- tpl .Values.server.affinity . | nindent 8 | trim }} + {{- else }} + {{- toYaml .Values.server.affinity | nindent 8 }} + {{- end }} {{ end }} {{- end -}} @@ -242,17 +247,27 @@ Sets the injector affinity for pod placement {{- define "injector.affinity" -}} {{- if .Values.injector.affinity }} affinity: - {{ tpl .Values.injector.affinity . | nindent 8 | trim }} + {{ $tp := typeOf .Values.injector.affinity }} + {{- if eq $tp "string" }} + {{- tpl .Values.injector.affinity . | nindent 8 | trim }} + {{- else }} + {{- toYaml .Values.injector.affinity | nindent 8 }} + {{- end }} {{ end }} {{- end -}} {{/* -Set's the toleration for pod placement when running in standalone and HA modes. +Sets the toleration for pod placement when running in standalone and HA modes. */}} {{- define "vault.tolerations" -}} {{- if and (ne .mode "dev") .Values.server.tolerations }} tolerations: + {{- $tp := typeOf .Values.server.tolerations }} + {{- if eq $tp "string" }} {{ tpl .Values.server.tolerations . | nindent 8 | trim }} + {{- else }} + {{- toYaml .Values.server.tolerations | nindent 8 }} + {{- end }} {{- end }} {{- end -}} @@ -262,7 +277,12 @@ Sets the injector toleration for pod placement {{- define "injector.tolerations" -}} {{- if .Values.injector.tolerations }} tolerations: + {{- $tp := typeOf .Values.injector.tolerations }} + {{- if eq $tp "string" }} {{ tpl .Values.injector.tolerations . | nindent 8 | trim }} + {{- else }} + {{- toYaml .Values.injector.tolerations | nindent 8 }} + {{- end }} {{- end }} {{- end -}} @@ -272,7 +292,12 @@ Set's the node selector for pod placement when running in standalone and HA mode {{- define "vault.nodeselector" -}} {{- if and (ne .mode "dev") .Values.server.nodeSelector }} nodeSelector: - {{ tpl .Values.server.nodeSelector . | indent 8 | trim }} + {{- $tp := typeOf .Values.server.nodeSelector }} + {{- if eq $tp "string" }} + {{ tpl .Values.server.nodeSelector . | nindent 8 | trim }} + {{- else }} + {{- toYaml .Values.server.nodeSelector | nindent 8 }} + {{- end }} {{- end }} {{- end -}} @@ -282,7 +307,12 @@ Sets the injector node selector for pod placement {{- define "injector.nodeselector" -}} {{- if .Values.injector.nodeSelector }} nodeSelector: - {{ tpl .Values.injector.nodeSelector . | indent 8 | trim }} + {{- $tp := typeOf .Values.injector.nodeSelector }} + {{- if eq $tp "string" }} + {{ tpl .Values.injector.nodeSelector . | nindent 8 | trim }} + {{- else }} + {{- toYaml .Values.injector.nodeSelector | nindent 8 }} + {{- end }} {{- end }} {{- end -}} @@ -331,6 +361,21 @@ Sets extra injector service annotations {{- end }} {{- end -}} +{{/* +Sets extra injector webhook annotations +*/}} +{{- define "injector.webhookAnnotations" -}} + {{- if .Values.injector.webhookAnnotations }} + annotations: + {{- $tp := typeOf .Values.injector.webhookAnnotations }} + {{- if eq $tp "string" }} + {{- tpl .Values.injector.webhookAnnotations . | nindent 4 }} + {{- else }} + {{- toYaml .Values.injector.webhookAnnotations | nindent 4 }} + {{- end }} + {{- end }} +{{- end -}} + {{/* Sets extra ui service annotations */}} @@ -527,7 +572,12 @@ Sets the injector toleration for pod placement {{- define "csi.pod.tolerations" -}} {{- if .Values.csi.pod.tolerations }} tolerations: + {{- $tp := typeOf .Values.csi.pod.tolerations }} + {{- if eq $tp "string" }} {{ tpl .Values.csi.pod.tolerations . | nindent 8 | trim }} + {{- else }} + {{- toYaml .Values.csi.pod.tolerations | nindent 8 }} + {{- end }} {{- end }} {{- end -}} @@ -596,3 +646,55 @@ Inject extra environment populated by secrets, if populated {{ "https" }} {{- end -}} {{- end -}} + +{{/* +imagePullSecrets generates pull secrets from either string or map values. +A map value must be indexable by the key 'name'. +*/}} +{{- define "imagePullSecrets" -}} +{{- with .Values.global.imagePullSecrets -}} +imagePullSecrets: +{{- range . -}} +{{- if typeIs "string" . }} + - name: {{ . }} +{{- else if index . "name" }} + - name: {{ .name }} +{{- end }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +externalTrafficPolicy sets a Service's externalTrafficPolicy if applicable. +Supported inputs are Values.server.service and Values.ui +*/}} +{{- define "service.externalTrafficPolicy" -}} +{{- $type := "" -}} +{{- if .serviceType -}} +{{- $type = .serviceType -}} +{{- else if .type -}} +{{- $type = .type -}} +{{- end -}} +{{- if and .externalTrafficPolicy (or (eq $type "LoadBalancer") (eq $type "NodePort")) }} + externalTrafficPolicy: {{ .externalTrafficPolicy }} +{{- else }} +{{- end }} +{{- end -}} + +{{/* +loadBalancer configuration for the the UI service. +Supported inputs are Values.ui +*/}} +{{- define "service.loadBalancer" -}} +{{- if eq (.serviceType | toString) "LoadBalancer" }} +{{- if .loadBalancerIP }} + loadBalancerIP: {{ .loadBalancerIP }} +{{- end }} +{{- with .loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{- range . }} + - {{ . }} +{{- end }} +{{- end -}} +{{- end }} +{{- end -}} diff --git a/templates/csi-daemonset.yaml b/templates/csi-daemonset.yaml index 75bde9a32..a6461fbd2 100644 --- a/templates/csi-daemonset.yaml +++ b/templates/csi-daemonset.yaml @@ -44,7 +44,7 @@ spec: - name: providervol mountPath: "/provider" - name: mountpoint-dir - mountPath: /var/lib/kubelet/pods + mountPath: {{ .Values.csi.daemonSet.kubeletRootDir }}/pods mountPropagation: HostToContainer {{- if .Values.csi.volumeMounts }} {{- toYaml .Values.csi.volumeMounts | nindent 12}} @@ -70,15 +70,12 @@ spec: volumes: - name: providervol hostPath: - path: "/etc/kubernetes/secrets-store-csi-providers" + path: {{ .Values.csi.daemonSet.providersDir }} - name: mountpoint-dir hostPath: - path: /var/lib/kubelet/pods + path: {{ .Values.csi.daemonSet.kubeletRootDir }}/pods {{- if .Values.csi.volumes }} {{- toYaml .Values.csi.volumes | nindent 8}} {{- end }} - {{- if .Values.global.imagePullSecrets }} - imagePullSecrets: - {{- toYaml .Values.global.imagePullSecrets | nindent 8 }} - {{- end }} + {{- include "imagePullSecrets" . | nindent 6 }} {{- end }} diff --git a/templates/injector-certs-secret.yaml b/templates/injector-certs-secret.yaml index aec802140..78363be55 100644 --- a/templates/injector-certs-secret.yaml +++ b/templates/injector-certs-secret.yaml @@ -7,4 +7,4 @@ metadata: app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/templates/injector-deployment.yaml b/templates/injector-deployment.yaml index 4756a253d..0d3c89158 100644 --- a/templates/injector-deployment.yaml +++ b/templates/injector-deployment.yaml @@ -107,7 +107,13 @@ spec: value: "{{ .Values.injector.agentDefaults.memLimit }}" - name: AGENT_INJECT_DEFAULT_TEMPLATE value: "{{ .Values.injector.agentDefaults.template }}" + - name: AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE + value: "{{ .Values.injector.agentDefaults.templateConfig.exitOnRetryFailure }}" {{- include "vault.extraEnvironmentVars" .Values.injector | nindent 12 }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: - agent-inject - 2>&1 @@ -131,13 +137,7 @@ spec: periodSeconds: 2 successThreshold: 1 timeoutSeconds: 5 -{{- if .Values.injector.certs.secretName }} - volumeMounts: - - name: webhook-certs - mountPath: /etc/webhook/certs - readOnly: true -{{- end }} - {{- if and (eq (.Values.injector.leaderElector.enabled | toString) "true") (gt (.Values.injector.replicas | int) 1) }} + {{- if and (eq (.Values.injector.leaderElector.enabled | toString) "true") (gt (.Values.injector.replicas | int) 1) (eq (.Values.injector.leaderElector.useContainer | toString) "true") }} - name: leader-elector image: {{ .Values.injector.leaderElector.image.repository }}:{{ .Values.injector.leaderElector.image.tag }} args: @@ -166,14 +166,17 @@ spec: successThreshold: 1 timeoutSeconds: 5 {{- end }} +{{- if .Values.injector.certs.secretName }} + volumeMounts: + - name: webhook-certs + mountPath: /etc/webhook/certs + readOnly: true +{{- end }} {{- if .Values.injector.certs.secretName }} volumes: - name: webhook-certs secret: secretName: "{{ .Values.injector.certs.secretName }}" {{- end }} - {{- if .Values.global.imagePullSecrets }} - imagePullSecrets: - {{- toYaml .Values.global.imagePullSecrets | nindent 8 }} - {{- end }} -{{ end }} + {{- include "imagePullSecrets" . | nindent 6 }} +{{ end }} \ No newline at end of file diff --git a/templates/injector-leader-endpoint.yaml b/templates/injector-leader-endpoint.yaml index fc4ef1608..42c4c0ae7 100644 --- a/templates/injector-leader-endpoint.yaml +++ b/templates/injector-leader-endpoint.yaml @@ -1,12 +1,14 @@ -{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") (eq (.Values.injector.leaderElector.enabled | toString) "true") (gt (.Values.injector.replicas | int) 1) }} +{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") (eq (.Values.injector.leaderElector.enabled | toString) "true") (gt (.Values.injector.replicas | int) 1) (eq (.Values.injector.leaderElector.useContainer | toString) "true")}} # This is created here so it can be cleaned up easily, since if # the endpoint is left around the leader won't expire for about a minute. apiVersion: v1 kind: Endpoints metadata: name: {{ template "vault.fullname" . }}-agent-injector-leader + annotations: + deprecated: "true" labels: app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/templates/injector-mutating-webhook.yaml b/templates/injector-mutating-webhook.yaml index abe23aabc..de7dd5622 100644 --- a/templates/injector-mutating-webhook.yaml +++ b/templates/injector-mutating-webhook.yaml @@ -11,6 +11,7 @@ metadata: app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- template "injector.webhookAnnotations" . }} webhooks: - name: vault.hashicorp.com sideEffects: None diff --git a/templates/injector-role.yaml b/templates/injector-role.yaml index e74524cea..446efaf59 100644 --- a/templates/injector-role.yaml +++ b/templates/injector-role.yaml @@ -9,11 +9,17 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} rules: - apiGroups: [""] - resources: ["endpoints", "secrets"] + resources: ["secrets", "configmaps", "endpoints"] verbs: - "create" - "get" - "watch" - "list" - "update" -{{- end }} \ No newline at end of file + - apiGroups: [""] + resources: ["pods"] + verbs: + - "get" + - "patch" + - "delete" +{{- end }} diff --git a/templates/injector-rolebinding.yaml b/templates/injector-rolebinding.yaml index e06d2425f..aa8179420 100644 --- a/templates/injector-rolebinding.yaml +++ b/templates/injector-rolebinding.yaml @@ -15,4 +15,4 @@ subjects: - kind: ServiceAccount name: {{ template "vault.fullname" . }}-agent-injector namespace: {{ .Release.Namespace }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/templates/server-ha-active-service.yaml b/templates/server-ha-active-service.yaml index 74fca41d7..c2a4f0227 100644 --- a/templates/server-ha-active-service.yaml +++ b/templates/server-ha-active-service.yaml @@ -21,6 +21,7 @@ spec: {{- if .Values.server.service.clusterIP }} clusterIP: {{ .Values.server.service.clusterIP }} {{- end }} + {{- include "service.externalTrafficPolicy" .Values.server.service }} publishNotReadyAddresses: true ports: - name: {{ include "vault.scheme" . }} diff --git a/templates/server-ha-standby-service.yaml b/templates/server-ha-standby-service.yaml index 9213b7452..dbba9d653 100644 --- a/templates/server-ha-standby-service.yaml +++ b/templates/server-ha-standby-service.yaml @@ -21,6 +21,7 @@ spec: {{- if .Values.server.service.clusterIP }} clusterIP: {{ .Values.server.service.clusterIP }} {{- end }} + {{- include "service.externalTrafficPolicy" .Values.server.service }} publishNotReadyAddresses: true ports: - name: {{ include "vault.scheme" . }} diff --git a/templates/server-ingress.yaml b/templates/server-ingress.yaml index deaa0dd55..48c76a828 100644 --- a/templates/server-ingress.yaml +++ b/templates/server-ingress.yaml @@ -4,11 +4,15 @@ {{- if .Values.server.ingress.enabled -}} {{- $extraPaths := .Values.server.ingress.extraPaths -}} {{- $serviceName := include "vault.fullname" . -}} -{{- if and (eq .mode "ha" ) (eq (.Values.server.service.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }} +{{- if and (eq .mode "ha" ) (eq (.Values.server.service.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") (eq (.Values.server.ingress.activeService | toString) "true") }} {{- $serviceName = printf "%s-%s" $serviceName "active" -}} {{- end }} {{- $servicePort := .Values.server.service.port -}} -{{ if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +{{- $pathType := .Values.server.ingress.pathType -}} +{{- $kubeVersion := .Capabilities.KubeVersion.Version }} +{{ if semverCompare ">= 1.19.0-0" $kubeVersion }} +apiVersion: networking.k8s.io/v1 +{{ else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} apiVersion: networking.k8s.io/v1beta1 {{ else }} apiVersion: extensions/v1beta1 @@ -36,6 +40,9 @@ spec: {{- end }} secretName: {{ .secretName }} {{- end }} +{{- end }} +{{- if .Values.server.ingress.ingressClassName }} + ingressClassName: {{ .Values.server.ingress.ingressClassName }} {{- end }} rules: {{- range .Values.server.ingress.hosts }} @@ -47,9 +54,19 @@ spec: {{- end }} {{- range (.paths | default (list "/")) }} - path: {{ . }} + {{ if semverCompare ">= 1.19.0-0" $kubeVersion }} + pathType: {{ $pathType }} + {{ end }} backend: + {{ if semverCompare ">= 1.19.0-0" $kubeVersion }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{ else }} serviceName: {{ $serviceName }} servicePort: {{ $servicePort }} + {{ end }} {{- end }} {{- end }} {{- end }} diff --git a/templates/server-route.yaml b/templates/server-route.yaml index 2fccf0263..63055db39 100644 --- a/templates/server-route.yaml +++ b/templates/server-route.yaml @@ -1,33 +1,33 @@ -{{- if .Values.global.openshift }} -{{- if ne .mode "external" }} -{{- if .Values.server.route.enabled -}} -{{- $serviceName := include "vault.fullname" . -}} -{{- if eq .mode "ha" }} -{{- $serviceName = printf "%s-%s" $serviceName "active" -}} -{{- end }} -kind: Route -apiVersion: route.openshift.io/v1 -metadata: - name: {{ template "vault.fullname" . }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- with .Values.server.route.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- template "vault.route.annotations" . }} -spec: - host: {{ .Values.server.route.host }} - to: - kind: Service - name: {{ $serviceName }} - weight: 100 - port: - targetPort: 8200 - tls: - termination: passthrough -{{- end }} -{{- end }} -{{- end }} +{{- if .Values.global.openshift }} +{{- if ne .mode "external" }} +{{- if .Values.server.route.enabled -}} +{{- $serviceName := include "vault.fullname" . -}} +{{- if and (eq .mode "ha" ) (eq (.Values.server.route.activeService | toString) "true") }} +{{- $serviceName = printf "%s-%s" $serviceName "active" -}} +{{- end }} +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: {{ template "vault.fullname" . }} + labels: + helm.sh/chart: {{ include "vault.chart" . }} + app.kubernetes.io/name: {{ include "vault.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.server.route.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- template "vault.route.annotations" . }} +spec: + host: {{ .Values.server.route.host }} + to: + kind: Service + name: {{ $serviceName }} + weight: 100 + port: + targetPort: 8200 + tls: + termination: passthrough +{{- end }} +{{- end }} +{{- end }} diff --git a/templates/server-service.yaml b/templates/server-service.yaml index 85a05db34..2e44ddf06 100644 --- a/templates/server-service.yaml +++ b/templates/server-service.yaml @@ -21,6 +21,7 @@ spec: {{- if .Values.server.service.clusterIP }} clusterIP: {{ .Values.server.service.clusterIP }} {{- end }} + {{- include "service.externalTrafficPolicy" .Values.server.service }} {{- if and (.Values.maas.lbAddress) (eq (.Values.server.service.type | toString) "LoadBalancer") }} loadBalancerIP: {{ .Values.maas.lbAddress }} {{- end }} diff --git a/templates/server-statefulset.yaml b/templates/server-statefulset.yaml index 718c9a03e..031b17905 100644 --- a/templates/server-statefulset.yaml +++ b/templates/server-statefulset.yaml @@ -202,10 +202,7 @@ spec: {{- if .Values.server.extraContainers }} {{ toYaml .Values.server.extraContainers | nindent 8}} {{- end }} - {{- if .Values.global.imagePullSecrets }} - imagePullSecrets: - {{- toYaml .Values.global.imagePullSecrets | nindent 8 }} - {{- end }} + {{- include "imagePullSecrets" . | nindent 6 }} {{ template "vault.volumeclaims" . }} {{ end }} {{ end }} diff --git a/templates/tests/server-test.yaml b/templates/tests/server-test.yaml index 37819de5c..66aa178f5 100644 --- a/templates/tests/server-test.yaml +++ b/templates/tests/server-test.yaml @@ -7,6 +7,7 @@ metadata: annotations: "helm.sh/hook": test spec: + {{- include "imagePullSecrets" . | nindent 2 }} containers: - name: {{ .Release.Name }}-server-test image: {{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default "latest" }} diff --git a/templates/ui-service.yaml b/templates/ui-service.yaml index 9e90af4bb..ea27de282 100644 --- a/templates/ui-service.yaml +++ b/templates/ui-service.yaml @@ -30,16 +30,8 @@ spec: nodePort: {{ .Values.ui.serviceNodePort }} {{- end }} type: {{ .Values.ui.serviceType }} - {{- if and (eq (.Values.ui.serviceType | toString) "LoadBalancer") (.Values.ui.loadBalancerSourceRanges) }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.ui.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} - {{- end }} - {{- if and (eq (.Values.ui.serviceType | toString) "LoadBalancer") (.Values.ui.loadBalancerIP) }} - loadBalancerIP: {{ .Values.ui.loadBalancerIP }} - {{- end }} + {{- include "service.externalTrafficPolicy" .Values.ui }} + {{- include "service.loadBalancer" .Values.ui }} {{- end -}} - {{- end }} {{- end }} diff --git a/test/README.md b/test/README.md index 28431dbf9..951a0616e 100644 --- a/test/README.md +++ b/test/README.md @@ -2,15 +2,29 @@ ## Running Vault Helm Acceptance tests -The Makefile at the top level of this repo contains a few target that should help with running acceptance tests in your own GKE instance. +The Makefile at the top level of this repo contains a few target that should help with running acceptance tests in your own GKE instance or in a kind cluster. -* Set the GOOGLE_CREDENTIALS and CLOUDSDK_CORE_PROJECT variables at the top of the file. GOOGLE_CREDENTIALS should contain the local path to your Google Cloud Platform account credentials in JSON format. CLOUDSDK_CORE_PROJECT should be set to the ID of your GCP project. +Note that for the Vault Enterprise tests to pass, a `VAULT_LICENSE_CI` environment variable needs to be set to the contents of a valid Vault Enterprise license. + +### Running in a GKE cluster + +* Set the `GOOGLE_CREDENTIALS` and `CLOUDSDK_CORE_PROJECT` variables at the top of the file. `GOOGLE_CREDENTIALS` should contain the local path to your Google Cloud Platform account credentials in JSON format. `CLOUDSDK_CORE_PROJECT` should be set to the ID of your GCP project. * Run `make test-image` to create the docker image (with dependencies installed) that will be re-used in the below steps. * Run `make test-provision` to provision the GKE cluster using terraform. * Run `make test-acceptance` to run the acceptance tests in this already provisioned cluster. * You can choose to only run certain tests by setting the ACCEPTANCE_TESTS variable and re-running the above target. * Run `make test-destroy` when you have finished testing and want to tear-down and remove the cluster. +### Running in a kind cluster + +* Run `make test-acceptance LOCAL_ACCEPTANCE_TESTS=true` +* You can choose to only run certain tests by setting the `ACCEPTANCE_TESTS` variable and re-running the above target. +* Run `make delete-kind` when you have finished testing and want to tear-down and remove the cluster. +* You can set an alternate kind cluster name by specifying the `KIND_CLUSTER_NAME` variable for any of the above targets. +* You can set an alternate K8S version by specifying the `KIND_K8S_VERSION` variable for any of the above targets. + +See [kind-quick-start](https://kind.sigs.k8s.io/docs/user/quick-start/) if you don't have kind installed on your system. + ## Running chart verification tests If [chart-verifier](https://github.com/redhat-certification/chart-verifier) is built and available in your PATH, run: diff --git a/test/acceptance/csi.bats b/test/acceptance/csi.bats index 67be09d6d..d222ca274 100644 --- a/test/acceptance/csi.bats +++ b/test/acceptance/csi.bats @@ -9,7 +9,8 @@ load _helpers kubectl create namespace acceptance # Install Secrets Store CSI driver - helm install secrets-store-csi-driver https://github.com/kubernetes-sigs/secrets-store-csi-driver/blob/master/charts/secrets-store-csi-driver-0.0.20.tgz?raw=true \ + CSI_DRIVER_VERSION=0.2.0 + helm install secrets-store-csi-driver https://github.com/kubernetes-sigs/secrets-store-csi-driver/blob/v${CSI_DRIVER_VERSION}/charts/secrets-store-csi-driver-${CSI_DRIVER_VERSION}.tgz?raw=true \ --wait --timeout=5m \ --namespace=acceptance \ --set linux.image.pullPolicy="IfNotPresent" diff --git a/test/acceptance/injector-leader-elector.bats b/test/acceptance/injector-leader-elector.bats index 8cfde5bf7..6f9f0b41f 100644 --- a/test/acceptance/injector-leader-elector.bats +++ b/test/acceptance/injector-leader-elector.bats @@ -4,28 +4,41 @@ load _helpers @test "injector: testing leader elector" { cd `chart_dir` - + kubectl delete namespace acceptance --ignore-not-found=true kubectl create namespace acceptance kubectl config set-context --current --namespace=acceptance helm install "$(name_prefix)" \ - --set="injector.replicas=3" . + --wait \ + --timeout=5m \ + --set="injector.replicas=3" \ + --set="injector.leaderElector.useContainer=true" . kubectl wait --for condition=Ready pod -l app.kubernetes.io/name=vault-agent-injector --timeout=5m pods=($(kubectl get pods -l app.kubernetes.io/name=vault-agent-injector -o json | jq -r '.items[] | .metadata.name')) [ "${#pods[@]}" == 3 ] - leader="$(echo "$(kubectl exec ${pods[0]} -c sidecar-injector -- wget --quiet --output-document - localhost:4040)" | jq -r .name)" - # Check the leader name is valid - i.e. one of the 3 pods - [[ " ${pods[@]} " =~ " ${leader} " ]] - - # Check every pod agrees on who the leader is - for pod in "${pods[@]}" + leader='' + tries=0 + until [ $tries -ge 60 ] do - pod_leader="$(echo "$(kubectl exec $pod -c sidecar-injector -- wget --quiet --output-document - localhost:4040)" | jq -r .name)" - [ "${pod_leader}" == "${leader}" ] + ## The new internal leader mechanism uses a ConfigMap + owner=$(kubectl get configmaps vault-k8s-leader -o json | jq -r .metadata.ownerReferences\[0\].name) + leader=$(kubectl get pods $owner -o json | jq -r .metadata.name) + [ -n "${leader}" ] && [ "${leader}" != "null" ] && break + + ## Also check the old leader-elector container + old_leader="$(echo "$(kubectl exec ${pods[0]} -c sidecar-injector -- wget --quiet --output-document - localhost:4040)" | jq -r .name)" + [ -n "${old_leader}" ] && break + + ((++tries)) + sleep .5 done + + # Check the leader name is valid - i.e. one of the 3 pods + [[ " ${pods[@]} " =~ " ${leader} " || " ${pods[@]} " =~ " ${old_leader} " ]] + } setup() { diff --git a/test/acceptance/server-ha-enterprise-dr.bats b/test/acceptance/server-ha-enterprise-dr.bats index 1445ffb83..c9a5d1993 100644 --- a/test/acceptance/server-ha-enterprise-dr.bats +++ b/test/acceptance/server-ha-enterprise-dr.bats @@ -7,7 +7,7 @@ load _helpers helm install "$(name_prefix)-east" \ --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.7.3_ent' \ + --set='server.image.tag=1.8.4_ent' \ --set='injector.enabled=false' \ --set='server.ha.enabled=true' \ --set='server.ha.raft.enabled=true' \ @@ -77,7 +77,7 @@ load _helpers helm install "$(name_prefix)-west" \ --set='injector.enabled=false' \ --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.7.3_ent' \ + --set='server.image.tag=1.8.4_ent' \ --set='server.ha.enabled=true' \ --set='server.ha.raft.enabled=true' \ --set='server.enterpriseLicense.secretName=vault-license' . diff --git a/test/acceptance/server-ha-enterprise-perf.bats b/test/acceptance/server-ha-enterprise-perf.bats index 98fc3e6d6..b42bb50d0 100644 --- a/test/acceptance/server-ha-enterprise-perf.bats +++ b/test/acceptance/server-ha-enterprise-perf.bats @@ -8,7 +8,7 @@ load _helpers helm install "$(name_prefix)-east" \ --set='injector.enabled=false' \ --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.7.3_ent' \ + --set='server.image.tag=1.8.4_ent' \ --set='server.ha.enabled=true' \ --set='server.ha.raft.enabled=true' \ --set='server.enterpriseLicense.secretName=vault-license' . @@ -77,7 +77,7 @@ load _helpers helm install "$(name_prefix)-west" \ --set='injector.enabled=false' \ --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.7.3_ent' \ + --set='server.image.tag=1.8.4_ent' \ --set='server.ha.enabled=true' \ --set='server.ha.raft.enabled=true' \ --set='server.enterpriseLicense.secretName=vault-license' . diff --git a/test/chart/verifier.bats b/test/chart/verifier.bats index e7ab5aa72..63c793951 100644 --- a/test/chart/verifier.bats +++ b/test/chart/verifier.bats @@ -6,10 +6,9 @@ setup_file() { cd `chart_dir` export VERIFY_OUTPUT="/$BATS_RUN_TMPDIR/verify.json" export CHART_VOLUME=vault-helm-chart-src - # Note: currently `latest` is the only tag available in the chart-verifier repo. - local IMAGE="quay.io/redhat-certification/chart-verifier:latest" + local IMAGE="quay.io/redhat-certification/chart-verifier:1.2.1" # chart-verifier requires an openshift version if a cluster isn't available - local OPENSHIFT_VERSION="4.7" + local OPENSHIFT_VERSION="4.8" local DISABLED_TESTS="chart-testing" local run_cmd="chart-verifier" @@ -24,7 +23,7 @@ setup_file() { # Make sure we have the latest version of chart-verifier docker pull $IMAGE # Start chart-verifier using this volume - run_cmd="docker run --rm --volumes-from $CHART_VOLUME $IMAGE" + run_cmd="docker run --rm --volumes-from $CHART_VOLUME -w $chart_src $IMAGE" fi $run_cmd verify $chart_src \ @@ -41,46 +40,46 @@ teardown_file() { } @test "has-kubeversion" { - check_result has-kubeversion + check_result v1.0/has-kubeversion } @test "is-helm-v3" { - check_result is-helm-v3 + check_result v1.0/is-helm-v3 } @test "not-contains-crds" { - check_result not-contains-crds + check_result v1.0/not-contains-crds } @test "helm-lint" { - check_result helm-lint + check_result v1.0/helm-lint } @test "not-contain-csi-objects" { - check_result not-contain-csi-objects + check_result v1.0/not-contain-csi-objects } @test "has-readme" { - check_result has-readme + check_result v1.0/has-readme } @test "contains-values" { - check_result contains-values + check_result v1.0/contains-values } @test "contains-values-schema" { - check_result contains-values-schema + check_result v1.0/contains-values-schema } @test "contains-test" { - check_result contains-test + check_result v1.0/contains-test } @test "images-are-certified" { - check_result images-are-certified + check_result v1.0/images-are-certified } @test "chart-testing" { skip "Skipping since this test requires a kubernetes/openshift cluster" - check_result chart-testing + check_result v1.0/chart-testing } diff --git a/test/kind/config.yaml b/test/kind/config.yaml new file mode 100644 index 000000000..250966401 --- /dev/null +++ b/test/kind/config.yaml @@ -0,0 +1,7 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane +- role: worker +- role: worker +- role: worker diff --git a/test/terraform/main.tf b/test/terraform/main.tf index 3556c6fd8..5c3570f27 100644 --- a/test/terraform/main.tf +++ b/test/terraform/main.tf @@ -8,7 +8,7 @@ resource "random_id" "suffix" { data "google_container_engine_versions" "main" { location = "${var.zone}" - version_prefix = "1.17." + version_prefix = "1.19." } data "google_service_account" "gcpapi" { diff --git a/test/unit/csi-daemonset.bats b/test/unit/csi-daemonset.bats index f0a62c2b2..5cfd8a7e8 100644 --- a/test/unit/csi-daemonset.bats +++ b/test/unit/csi-daemonset.bats @@ -72,6 +72,33 @@ load _helpers . | tee /dev/stderr | yq -r '.spec.template.spec.imagePullSecrets' | tee /dev/stderr) + local actual=$(echo $object | + yq -r '. | length' | tee /dev/stderr) + [ "${actual}" = "2" ] + + local actual=$(echo $object | + yq -r '.[0].name' | tee /dev/stderr) + [ "${actual}" = "foo" ] + + local actual=$(echo $object | + yq -r '.[1].name' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "csi/daemonset: Custom imagePullSecrets - string array" { + cd `chart_dir` + local object=$(helm template \ + --show-only templates/csi-daemonset.yaml \ + --set "csi.enabled=true" \ + --set 'global.imagePullSecrets[0]=foo' \ + --set 'global.imagePullSecrets[1]=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.imagePullSecrets' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '. | length' | tee /dev/stderr) + [ "${actual}" = "2" ] + local actual=$(echo $object | yq -r '.[0].name' | tee /dev/stderr) [ "${actual}" = "foo" ] @@ -246,7 +273,7 @@ load _helpers [ "${actual}" = "true" ] } -@test "csi/daemonset: tolerations can be set" { +@test "csi/daemonset: tolerations can be set as string" { cd `chart_dir` local actual=$(helm template \ --show-only templates/csi-daemonset.yaml \ @@ -257,6 +284,17 @@ load _helpers [ "${actual}" = "true" ] } +@test "csi/daemonset: tolerations can be set as YAML" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/csi-daemonset.yaml \ + --set 'csi.enabled=true' \ + --set "csi.pod.tolerations[0].foo=bar,csi.pod.tolerations[1].baz=qux" \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations == [{"foo": "bar"}, {"baz": "qux"}]' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # volumes @@ -277,6 +315,68 @@ load _helpers [ "${actual}" = "{}" ] } +@test "csi/daemonset: csi providersDir default" { + cd `chart_dir` + + # Test that it defines it + local object=$(helm template \ + --show-only templates/csi-daemonset.yaml \ + --set 'csi.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.volumes[] | select(.name == "providervol")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.hostPath.path' | tee /dev/stderr) + [ "${actual}" = "/etc/kubernetes/secrets-store-csi-providers" ] +} + +@test "csi/daemonset: csi kubeletRootDir default" { + cd `chart_dir` + + # Test that it defines it + local object=$(helm template \ + --show-only templates/csi-daemonset.yaml \ + --set 'csi.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.volumes[] | select(.name == "mountpoint-dir")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.hostPath.path' | tee /dev/stderr) + [ "${actual}" = "/var/lib/kubelet/pods" ] +} + +@test "csi/daemonset: csi providersDir override " { + cd `chart_dir` + + # Test that it defines it + local object=$(helm template \ + --show-only templates/csi-daemonset.yaml \ + --set 'csi.enabled=true' \ + --set 'csi.daemonSet.providersDir=/alt/csi-prov-dir' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.volumes[] | select(.name == "providervol")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.hostPath.path' | tee /dev/stderr) + [ "${actual}" = "/alt/csi-prov-dir" ] +} + +@test "csi/daemonset: csi kubeletRootDir override" { + cd `chart_dir` + + # Test that it defines it + local object=$(helm template \ + --show-only templates/csi-daemonset.yaml \ + --set 'csi.enabled=true' \ + --set 'csi.daemonSet.kubeletRootDir=/alt/kubelet-root' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.volumes[] | select(.name == "mountpoint-dir")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.hostPath.path' | tee /dev/stderr) + [ "${actual}" = "/alt/kubelet-root/pods" ] +} + #-------------------------------------------------------------------- # volumeMounts diff --git a/test/unit/injector-deployment.bats b/test/unit/injector-deployment.bats index cd6f1ddd3..9a634c301 100755 --- a/test/unit/injector-deployment.bats +++ b/test/unit/injector-deployment.bats @@ -432,7 +432,7 @@ load _helpers [ "${actual}" = "false" ] } -@test "injector/deployment: affinity can be set" { +@test "injector/deployment: affinity can be set as string" { cd `chart_dir` local actual=$(helm template \ --show-only templates/injector-deployment.yaml \ @@ -442,6 +442,16 @@ load _helpers [ "${actual}" = "true" ] } +@test "injector/deployment: affinity can be set as YAML" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/injector-deployment.yaml \ + --set 'injector.affinity.podAntiAffinity=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.affinity.podAntiAffinity == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # tolerations @@ -454,7 +464,7 @@ load _helpers [ "${actual}" = "true" ] } -@test "injector/deployment: tolerations can be set" { +@test "injector/deployment: tolerations can be set as string" { cd `chart_dir` local actual=$(helm template \ --show-only templates/injector-deployment.yaml \ @@ -464,6 +474,16 @@ load _helpers [ "${actual}" = "true" ] } +@test "injector/deployment: tolerations can be set as YAML" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/injector-deployment.yaml \ + --set "injector.tolerations[0].foo=bar,injector.tolerations[1].baz=qux" \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations == [{"foo": "bar"}, {"baz": "qux"}]' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # nodeSelector @@ -476,7 +496,7 @@ load _helpers [ "${actual}" = "null" ] } -@test "injector/deployment: nodeSelector can be set" { +@test "injector/deployment: nodeSelector can be set as string" { cd `chart_dir` local actual=$(helm template \ --show-only templates/injector-deployment.yaml \ @@ -486,6 +506,17 @@ load _helpers [ "${actual}" = "testing" ] } +@test "injector/deployment: nodeSelector can be set as YAML" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/injector-deployment.yaml \ + --set "injector.nodeSelector.beta\.kubernetes\.io/arch=amd64" \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector == {"beta.kubernetes.io/arch": "amd64"}' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + + #-------------------------------------------------------------------- # priorityClassName @@ -640,3 +671,28 @@ load _helpers yq -r 'map(select(.name=="AGENT_INJECT_DEFAULT_TEMPLATE")) | .[] .value' | tee /dev/stderr) [ "${value}" = "json" ] } + +@test "injector/deployment: agent default template_config.exit_on_retry_failure" { + cd `chart_dir` + local object=$(helm template \ + --show-only templates/injector-deployment.yaml \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) + + local value=$(echo $object | + yq -r 'map(select(.name=="AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE")) | .[] .value' | tee /dev/stderr) + [ "${value}" = "true" ] +} + +@test "injector/deployment: can set agent template_config.exit_on_retry_failure" { + cd `chart_dir` + local object=$(helm template \ + --show-only templates/injector-deployment.yaml \ + --set='injector.agentDefaults.templateConfig.exitOnRetryFailure=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) + + local value=$(echo $object | + yq -r 'map(select(.name=="AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE")) | .[] .value' | tee /dev/stderr) + [ "${value}" = "false" ] +} diff --git a/test/unit/injector-leader-elector.bats b/test/unit/injector-leader-elector.bats index 6c77d81da..75ab2982c 100644 --- a/test/unit/injector-leader-elector.bats +++ b/test/unit/injector-leader-elector.bats @@ -5,7 +5,7 @@ load _helpers #-------------------------------------------------------------------- # Deployment -@test "injector/deployment: leader elector replica count" { +@test "injector/deployment: replica count" { cd `chart_dir` local actual=$(helm template \ --show-only templates/injector-deployment.yaml \ @@ -15,42 +15,6 @@ load _helpers [ "${actual}" = "2" ] } -@test "injector/deployment: leader elector - sidecar is created only when enabled" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers | length' | tee /dev/stderr) - [ "${actual}" = "1" ] - - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set "injector.replicas=2" \ - --set "injector.leaderElector.enabled=false" \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers | length' | tee /dev/stderr) - [ "${actual}" = "1" ] - - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set "injector.replicas=2" \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers | length' | tee /dev/stderr) - [ "${actual}" = "2" ] -} - -@test "injector/deployment: leader elector image name is configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set "injector.replicas=2" \ - --set "injector.leaderElector.image.repository=SomeOtherImage" \ - --set "injector.leaderElector.image.tag=SomeOtherTag" \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].image' | tee /dev/stderr) - [ "${actual}" = "SomeOtherImage:SomeOtherTag" ] -} - @test "injector/deployment: leader elector configuration for sidecar-injector" { cd `chart_dir` local actual=$(helm template \ @@ -80,26 +44,6 @@ load _helpers [ "${actual}" = "metadata.namespace" ] } -@test "injector/deployment: leader elector TTL is configurable" { - cd `chart_dir` - # Default value 60s - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set "injector.replicas=2" \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].args[3]' | tee /dev/stderr) - [ "${actual}" = "--ttl=60s" ] - - # Configured to 30s - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set "injector.replicas=2" \ - --set "injector.leaderElector.ttl=30s" \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].args[3]' | tee /dev/stderr) - [ "${actual}" = "--ttl=30s" ] -} - #-------------------------------------------------------------------- # Resource creation @@ -143,16 +87,16 @@ load _helpers [ "${actual}" = "true" ] } -@test "injector/leader-endpoint: created/skipped as appropriate" { +@test "injector/role: created/skipped as appropriate" { cd `chart_dir` local actual=$( (helm template \ - --show-only templates/injector-leader-endpoint.yaml \ + --show-only templates/injector-role.yaml \ . || echo "---") | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-leader-endpoint.yaml \ + --show-only templates/injector-role.yaml \ --set "injector.replicas=2" \ --set "global.enabled=false" \ . || echo "---") | tee /dev/stderr | @@ -160,7 +104,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-leader-endpoint.yaml \ + --show-only templates/injector-role.yaml \ --set "injector.replicas=2" \ --set "injector.enabled=false" \ . || echo "---") | tee /dev/stderr | @@ -168,7 +112,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-leader-endpoint.yaml \ + --show-only templates/injector-role.yaml \ --set "injector.replicas=2" \ --set "injector.leaderElector.enabled=false" \ . || echo "---") | tee /dev/stderr | @@ -176,23 +120,23 @@ load _helpers [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-leader-endpoint.yaml \ + --show-only templates/injector-role.yaml \ --set "injector.replicas=2" \ . || echo "---") | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "injector/role: created/skipped as appropriate" { +@test "injector/rolebinding: created/skipped as appropriate" { cd `chart_dir` local actual=$( (helm template \ - --show-only templates/injector-role.yaml \ + --show-only templates/injector-rolebinding.yaml \ . || echo "---") | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-role.yaml \ + --show-only templates/injector-rolebinding.yaml \ --set "injector.replicas=2" \ --set "global.enabled=false" \ . || echo "---") | tee /dev/stderr | @@ -200,7 +144,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-role.yaml \ + --show-only templates/injector-rolebinding.yaml \ --set "injector.replicas=2" \ --set "injector.enabled=false" \ . || echo "---") | tee /dev/stderr | @@ -208,7 +152,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-role.yaml \ + --show-only templates/injector-rolebinding.yaml \ --set "injector.replicas=2" \ --set "injector.leaderElector.enabled=false" \ . || echo "---") | tee /dev/stderr | @@ -216,23 +160,87 @@ load _helpers [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-role.yaml \ + --show-only templates/injector-rolebinding.yaml \ --set "injector.replicas=2" \ . || echo "---") | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "injector/rolebinding: created/skipped as appropriate" { +#-------------------------------------------------------------------- +# Old leader-elector container support +# Note: deprecated and will be removed soon + +@test "injector/deployment: leader elector - sidecar is created only when enabled" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/injector-deployment.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers | length' | tee /dev/stderr) + [ "${actual}" = "1" ] + + local actual=$(helm template \ + --show-only templates/injector-deployment.yaml \ + --set "injector.replicas=2" \ + --set "injector.leaderElector.enabled=false" \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers | length' | tee /dev/stderr) + [ "${actual}" = "1" ] + + local actual=$(helm template \ + --show-only templates/injector-deployment.yaml \ + --set "injector.replicas=2" \ + --set "injector.leaderElector.useContainer=true" \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers | length' | tee /dev/stderr) + [ "${actual}" = "2" ] +} + +@test "injector/deployment: leader elector image name is configurable" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/injector-deployment.yaml \ + --set "injector.replicas=2" \ + --set "injector.leaderElector.useContainer=true" \ + --set "injector.leaderElector.image.repository=SomeOtherImage" \ + --set "injector.leaderElector.image.tag=SomeOtherTag" \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].image' | tee /dev/stderr) + [ "${actual}" = "SomeOtherImage:SomeOtherTag" ] +} + +@test "injector/deployment: leader elector TTL is configurable" { + cd `chart_dir` + # Default value 60s + local actual=$(helm template \ + --show-only templates/injector-deployment.yaml \ + --set "injector.replicas=2" \ + --set "injector.leaderElector.useContainer=true" \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args[3]' | tee /dev/stderr) + [ "${actual}" = "--ttl=60s" ] + + # Configured to 30s + local actual=$(helm template \ + --show-only templates/injector-deployment.yaml \ + --set "injector.replicas=2" \ + --set "injector.leaderElector.useContainer=true" \ + --set "injector.leaderElector.ttl=30s" \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args[3]' | tee /dev/stderr) + [ "${actual}" = "--ttl=30s" ] +} + +@test "injector/leader-endpoint: created/skipped as appropriate" { cd `chart_dir` local actual=$( (helm template \ - --show-only templates/injector-rolebinding.yaml \ + --show-only templates/injector-leader-endpoint.yaml \ . || echo "---") | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-rolebinding.yaml \ + --show-only templates/injector-leader-endpoint.yaml \ --set "injector.replicas=2" \ --set "global.enabled=false" \ . || echo "---") | tee /dev/stderr | @@ -240,7 +248,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-rolebinding.yaml \ + --show-only templates/injector-leader-endpoint.yaml \ --set "injector.replicas=2" \ --set "injector.enabled=false" \ . || echo "---") | tee /dev/stderr | @@ -248,7 +256,7 @@ load _helpers [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-rolebinding.yaml \ + --show-only templates/injector-leader-endpoint.yaml \ --set "injector.replicas=2" \ --set "injector.leaderElector.enabled=false" \ . || echo "---") | tee /dev/stderr | @@ -256,9 +264,10 @@ load _helpers [ "${actual}" = "false" ] local actual=$( (helm template \ - --show-only templates/injector-rolebinding.yaml \ + --show-only templates/injector-leader-endpoint.yaml \ --set "injector.replicas=2" \ + --set "injector.leaderElector.useContainer=true" \ . || echo "---") | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] -} \ No newline at end of file +} diff --git a/test/unit/injector-mutating-webhook.bats b/test/unit/injector-mutating-webhook.bats index 65f505bcf..1e6e150d0 100755 --- a/test/unit/injector-mutating-webhook.bats +++ b/test/unit/injector-mutating-webhook.bats @@ -121,3 +121,35 @@ load _helpers [ "${actual}" = "\"Fail\"" ] } + +#-------------------------------------------------------------------- +# annotations + +@test "injector/MutatingWebhookConfiguration: default annotations" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/injector-mutating-webhook.yaml \ + . | tee /dev/stderr | + yq -r '.metadata.annotations' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "injector/MutatingWebhookConfiguration: specify annotations yaml" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/injector-mutating-webhook.yaml \ + --set 'injector.webhookAnnotations.foo=bar' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "injector/MutatingWebhookConfiguration: specify annotations yaml string" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/injector-mutating-webhook.yaml \ + --set 'injector.webhookAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} diff --git a/test/unit/server-ha-active-service.bats b/test/unit/server-ha-active-service.bats index be3060d64..a835c9d9c 100755 --- a/test/unit/server-ha-active-service.bats +++ b/test/unit/server-ha-active-service.bats @@ -157,3 +157,43 @@ load _helpers yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr) [ "${actual}" = "https" ] } + +# duplicated in server-service.bats +@test "server/ha-active-Service: NodePort assert externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-ha-active-service.yaml \ + --set 'server.ha.enabled=true' \ + --set 'server.service.type=NodePort' \ + --set 'server.service.externalTrafficPolicy=Foo' \ + . | tee /dev/stderr | + yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "Foo" ] +} + +# duplicated in server-service.bats +@test "server/ha-active-Service: NodePort assert no externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-ha-active-service.yaml \ + --set 'server.ha.enabled=true' \ + --set 'server.service.type=NodePort' \ + --set 'server.service.externalTrafficPolicy=' \ + . | tee /dev/stderr | + yq '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +# duplicated in server-service.bats +@test "server/ha-active-Service: ClusterIP assert no externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-ha-active-service.yaml \ + --set 'server.ha.enabled=true' \ + --set 'server.service.type=ClusterIP' \ + --set 'server.service.externalTrafficPolicy=Foo' \ + . | tee /dev/stderr | + yq '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + diff --git a/test/unit/server-ha-standby-service.bats b/test/unit/server-ha-standby-service.bats index e164cde1c..7dfd5d7fd 100755 --- a/test/unit/server-ha-standby-service.bats +++ b/test/unit/server-ha-standby-service.bats @@ -168,3 +168,43 @@ load _helpers yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr) [ "${actual}" = "https" ] } + +# duplicated in server-service.bats +@test "server/ha-standby-Service: NodePort assert externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-ha-standby-service.yaml \ + --set 'server.ha.enabled=true' \ + --set 'server.service.type=NodePort' \ + --set 'server.service.externalTrafficPolicy=Foo' \ + . | tee /dev/stderr | + yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "Foo" ] +} + +# duplicated in server-service.bats +@test "server/ha-standby-Service: NodePort assert no externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-ha-standby-service.yaml \ + --set 'server.ha.enabled=true' \ + --set 'server.service.type=NodePort' \ + --set 'server.service.externalTrafficPolicy=' \ + . | tee /dev/stderr | + yq '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +# duplicated in server-service.bats +@test "server/ha-standby-Service: ClusterIP assert no externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-ha-standby-service.yaml \ + --set 'server.ha.enabled=true' \ + --set 'server.service.type=ClusterIP' \ + --set 'server.service.externalTrafficPolicy=Foo' \ + . | tee /dev/stderr | + yq '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + diff --git a/test/unit/server-ha-statefulset.bats b/test/unit/server-ha-statefulset.bats index 43e1acef2..cc77e7e39 100755 --- a/test/unit/server-ha-statefulset.bats +++ b/test/unit/server-ha-statefulset.bats @@ -571,7 +571,7 @@ load _helpers [ "${actual}" = "null" ] } -@test "server/ha-StatefulSet: specified nodeSelector" { +@test "server/ha-StatefulSet: specified nodeSelector as string" { cd `chart_dir` local actual=$(helm template \ --show-only templates/server-statefulset.yaml \ @@ -582,6 +582,17 @@ load _helpers [ "${actual}" = "testing" ] } +@test "server/ha-StatefulSet: nodeSelector can be set as YAML" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-statefulset.yaml \ + --set 'server.ha.enabled=true' \ + --set "server.nodeSelector.beta\.kubernetes\.io/arch=amd64" \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector == {"beta.kubernetes.io/arch": "amd64"}' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Security Contexts @test "server/ha-StatefulSet: uid default" { diff --git a/test/unit/server-ingress.bats b/test/unit/server-ingress.bats index bf191c3a2..4132c16a7 100755 --- a/test/unit/server-ingress.bats +++ b/test/unit/server-ingress.bats @@ -52,7 +52,7 @@ load _helpers --set 'server.ingress.hosts[0].host=test.com' \ --set 'server.ingress.hosts[0].paths[0]=/' \ . | tee /dev/stderr | - yq -r '.spec.rules[0].http.paths[0].backend.serviceName | length > 0' | tee /dev/stderr) + yq -r '.spec.rules[0].http.paths[0].backend.service.name | length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -66,9 +66,9 @@ load _helpers --set 'server.ingress.hosts[0].host=test.com' \ --set 'server.ingress.hosts[0].paths[0]=/' \ --set 'server.ingress.extraPaths[0].path=/annotation-service' \ - --set 'server.ingress.extraPaths[0].backend.serviceName=ssl-redirect' \ + --set 'server.ingress.extraPaths[0].backend.service.name=ssl-redirect' \ . | tee /dev/stderr | - yq -r '.spec.rules[0].http.paths[0].backend.serviceName' | tee /dev/stderr) + yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr) [ "${actual}" = 'ssl-redirect' ] local actual=$(helm template \ @@ -77,7 +77,7 @@ load _helpers --set 'server.ingress.hosts[0].host=test.com' \ --set 'server.ingress.hosts[0].paths[0]=/' \ --set 'server.ingress.extraPaths[0].path=/annotation-service' \ - --set 'server.ingress.extraPaths[0].backend.serviceName=ssl-redirect' \ + --set 'server.ingress.extraPaths[0].backend.service.name=ssl-redirect' \ . | tee /dev/stderr | yq -r '.spec.rules[0].http.paths[0].path' | tee /dev/stderr) [ "${actual}" = '/annotation-service' ] @@ -88,7 +88,7 @@ load _helpers --set 'server.ingress.hosts[0].host=test.com' \ --set 'server.ingress.hosts[0].paths[0]=/' \ --set 'server.ingress.extraPaths[0].path=/annotation-service' \ - --set 'server.ingress.extraPaths[0].backend.serviceName=ssl-redirect' \ + --set 'server.ingress.extraPaths[0].backend.service.name=ssl-redirect' \ . | tee /dev/stderr | yq -r '.spec.rules[0].http.paths[1].path' | tee /dev/stderr) [ "${actual}" = '/' ] @@ -131,7 +131,30 @@ load _helpers [ "${actual}" = "nginx" ] } -@test "server/ingress: uses active service when ha - yaml" { +@test "server/ingress: ingressClassName added to object spec - string" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-ingress.yaml \ + --set 'server.ingress.enabled=true' \ + --set server.ingress.ingressClassName=nginx \ + . | tee /dev/stderr | + yq -r '.spec.ingressClassName' | tee /dev/stderr) + [ "${actual}" = "nginx" ] +} + +@test "server/ingress: ingressClassName is not added by default" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-ingress.yaml \ + --set 'server.ingress.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.ingressClassName' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "server/ingress: uses active service when ha by default - yaml" { cd `chart_dir` local actual=$(helm template \ @@ -141,10 +164,25 @@ load _helpers --set 'server.ha.enabled=true' \ --set 'server.service.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.rules[0].http.paths[0].backend.serviceName' | tee /dev/stderr) + yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr) [ "${actual}" = "RELEASE-NAME-vault-active" ] } +@test "server/ingress: uses regular service when configured with ha - yaml" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-ingress.yaml \ + --set 'server.ingress.enabled=true' \ + --set 'server.ingress.activeService=false' \ + --set 'server.dev.enabled=false' \ + --set 'server.ha.enabled=true' \ + --set 'server.service.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr) + [ "${actual}" = "RELEASE-NAME-vault" ] +} + @test "server/ingress: uses regular service when not ha - yaml" { cd `chart_dir` @@ -155,6 +193,75 @@ load _helpers --set 'server.ha.enabled=false' \ --set 'server.service.enabled=true' \ . | tee /dev/stderr | + yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr) + [ "${actual}" = "RELEASE-NAME-vault" ] +} + +@test "server/ingress: k8s 1.18.3 uses regular service when not ha - yaml" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-ingress.yaml \ + --set 'server.ingress.enabled=true' \ + --set 'server.dev.enabled=false' \ + --set 'server.ha.enabled=false' \ + --set 'server.service.enabled=true' \ + --kube-version 1.18.3 \ + . | tee /dev/stderr | yq -r '.spec.rules[0].http.paths[0].backend.serviceName' | tee /dev/stderr) [ "${actual}" = "RELEASE-NAME-vault" ] -} \ No newline at end of file +} + +@test "server/ingress: uses regular service when not ha and activeService is true - yaml" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-ingress.yaml \ + --set 'server.ingress.enabled=true' \ + --set 'server.ingress.activeService=true' \ + --set 'server.dev.enabled=false' \ + --set 'server.ha.enabled=false' \ + --set 'server.service.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr) + [ "${actual}" = "RELEASE-NAME-vault" ] +} + +@test "server/ingress: pathType is added to Kubernetes version == 1.19.0" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-ingress.yaml \ + --set 'server.ingress.enabled=true' \ + --set server.ingress.pathType=ImplementationSpecific \ + --kube-version 1.19.0 \ + . | tee /dev/stderr | + yq -r '.spec.rules[0].http.paths[0].pathType' | tee /dev/stderr) + [ "${actual}" = "ImplementationSpecific" ] +} + +@test "server/ingress: pathType is not added to Kubernetes versions < 1.19" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-ingress.yaml \ + --set 'server.ingress.enabled=true' \ + --set server.ingress.pathType=ImplementationSpecific \ + --kube-version 1.18.3 \ + . | tee /dev/stderr | + yq -r '.spec.rules[0].http.paths[0].pathType' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "server/ingress: pathType is added to Kubernetes versions > 1.19" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-ingress.yaml \ + --set 'server.ingress.enabled=true' \ + --set server.ingress.pathType=Prefix \ + --kube-version 1.20.0 \ + . | tee /dev/stderr | + yq -r '.spec.rules[0].http.paths[0].pathType' | tee /dev/stderr) + [ "${actual}" = "Prefix" ] +} diff --git a/test/unit/server-route.bats b/test/unit/server-route.bats index f5830e6dd..d141fb635 100755 --- a/test/unit/server-route.bats +++ b/test/unit/server-route.bats @@ -102,7 +102,20 @@ load _helpers [ "${actual}" = "RELEASE-NAME-vault" ] } -@test "server/route: OpenShift - route points to active service by when HA" { +@test "server/route: OpenShift - route points to main service when not ha and activeService is true" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-route.yaml \ + --set 'global.openshift=true' \ + --set 'server.route.enabled=true' \ + --set 'server.route.activeService=true' \ + . | tee /dev/stderr | + yq -r '.spec.to.name' | tee /dev/stderr) + [ "${actual}" = "RELEASE-NAME-vault" ] +} + +@test "server/route: OpenShift - route points to active service by when HA by default" { cd `chart_dir` local actual=$(helm template \ @@ -114,3 +127,17 @@ load _helpers yq -r '.spec.to.name' | tee /dev/stderr) [ "${actual}" = "RELEASE-NAME-vault-active" ] } + +@test "server/route: OpenShift - route points to general service by when HA when configured" { + cd `chart_dir` + + local actual=$(helm template \ + --show-only templates/server-route.yaml \ + --set 'global.openshift=true' \ + --set 'server.route.enabled=true' \ + --set 'server.route.activeService=false' \ + --set 'server.ha.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.to.name' | tee /dev/stderr) + [ "${actual}" = "RELEASE-NAME-vault" ] +} diff --git a/test/unit/server-service.bats b/test/unit/server-service.bats index 7922f0ff3..4695f2fff 100755 --- a/test/unit/server-service.bats +++ b/test/unit/server-service.bats @@ -384,3 +384,43 @@ load _helpers yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr) [ "${actual}" = "https" ] } + +# duplicated in server-ha-active-service.bats +@test "server/Service: NodePort assert externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-service.yaml \ + --set 'server.ha.enabled=true' \ + --set 'server.service.type=NodePort' \ + --set 'server.service.externalTrafficPolicy=Foo' \ + . | tee /dev/stderr | + yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "Foo" ] +} + +# duplicated in server-ha-active-service.bats +@test "server/ha-active-Service: NodePort assert no externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-service.yaml \ + --set 'server.ha.enabled=true' \ + --set 'server.service.type=NodePort' \ + --set 'server.service.externalTrafficPolicy=' \ + . | tee /dev/stderr | + yq '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +# duplicated in server-ha-active-service.bats +@test "server/Service: ClusterIP assert no externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-service.yaml \ + --set 'server.ha.enabled=true' \ + --set 'server.service.type=ClusterIP' \ + --set 'server.service.externalTrafficPolicy=Foo' \ + . | tee /dev/stderr | + yq '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + diff --git a/test/unit/server-statefulset.bats b/test/unit/server-statefulset.bats index d7edb969d..b93905188 100755 --- a/test/unit/server-statefulset.bats +++ b/test/unit/server-statefulset.bats @@ -146,6 +146,32 @@ load _helpers . | tee /dev/stderr | yq -r '.spec.template.spec.imagePullSecrets' | tee /dev/stderr) + local actual=$(echo $object | + yq -r '. | length' | tee /dev/stderr) + [ "${actual}" = "2" ] + + local actual=$(echo $object | + yq -r '.[0].name' | tee /dev/stderr) + [ "${actual}" = "foo" ] + + local actual=$(echo $object | + yq -r '.[1].name' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "server/standalone-StatefulSet: Custom imagePullSecrets - string array" { + cd `chart_dir` + local object=$(helm template \ + --show-only templates/server-statefulset.yaml \ + --set 'global.imagePullSecrets[0]=foo' \ + --set 'global.imagePullSecrets[1]=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.imagePullSecrets' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '. | length' | tee /dev/stderr) + [ "${actual}" = "2" ] + local actual=$(echo $object | yq -r '.[0].name' | tee /dev/stderr) [ "${actual}" = "foo" ] @@ -738,7 +764,7 @@ load _helpers [ "${actual}" = "true" ] } -@test "server/standalone-StatefulSet: affinity can be set" { +@test "server/standalone-StatefulSet: affinity can be set as string" { cd `chart_dir` local actual=$(helm template \ --show-only templates/server-statefulset.yaml \ @@ -748,6 +774,17 @@ load _helpers [ "${actual}" = "true" ] } +@test "server/standalone-StatefulSet: affinity can be set as YAML" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-statefulset.yaml \ + --set 'server.affinity.podAntiAffinity=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.affinity.podAntiAffinity == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + + @test "server/standalone-StatefulSet: tolerations not set by default" { cd `chart_dir` local actual=$(helm template \ @@ -757,7 +794,7 @@ load _helpers [ "${actual}" = "true" ] } -@test "server/standalone-StatefulSet: tolerations can be set" { +@test "server/standalone-StatefulSet: tolerations can be set as string" { cd `chart_dir` local actual=$(helm template \ --show-only templates/server-statefulset.yaml \ @@ -767,6 +804,16 @@ load _helpers [ "${actual}" = "true" ] } +@test "server/standalone-StatefulSet: tolerations can be set as YAML" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-statefulset.yaml \ + --set "server.tolerations[0].foo=bar,server.tolerations[1].baz=qux" \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations == [{"foo": "bar"}, {"baz": "qux"}]' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "server/standalone-StatefulSet: nodeSelector is not set by default" { cd `chart_dir` local actual=$(helm template \ @@ -776,7 +823,7 @@ load _helpers [ "${actual}" = "null" ] } -@test "server/standalone-StatefulSet: specified nodeSelector" { +@test "server/standalone-StatefulSet: specified nodeSelector as string" { cd `chart_dir` local actual=$(helm template \ --show-only templates/server-statefulset.yaml \ @@ -786,6 +833,16 @@ load _helpers [ "${actual}" = "testing" ] } +@test "server/standalone-StatefulSet: nodeSelector can be set as YAML" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/server-statefulset.yaml \ + --set "server.nodeSelector.beta\.kubernetes\.io/arch=amd64" \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector == {"beta.kubernetes.io/arch": "amd64"}' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # extraInitContainers diff --git a/test/unit/ui-service.bats b/test/unit/ui-service.bats index 9dade3db3..f439aecfb 100755 --- a/test/unit/ui-service.bats +++ b/test/unit/ui-service.bats @@ -135,6 +135,16 @@ load _helpers . | tee /dev/stderr | yq -r '.spec.type' | tee /dev/stderr) [ "${actual}" = "LoadBalancer" ] + + local actual=$(helm template \ + --show-only templates/ui-service.yaml \ + --set 'server.standalone.enabled=true' \ + --set 'ui.serviceType=LoadBalancer' \ + --set 'ui.externalTrafficPolicy=Local' \ + --set 'ui.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "Local" ] } @test "ui/Service: LoadBalancerIP set if specified and serviceType == LoadBalancer" { @@ -183,6 +193,19 @@ load _helpers [ "${actual}" = "null" ] } +@test "ui/Service: ClusterIP assert no externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/ui-service.yaml \ + --set 'server.standalone.enabled=true' \ + --set 'ui.serviceType=ClusterIP' \ + --set 'ui.externalTrafficPolicy=Foo' \ + --set 'ui.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + @test "ui/Service: specify annotations" { cd `chart_dir` local actual=$(helm template \ @@ -323,3 +346,31 @@ load _helpers yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) [ "${actual}" = "123" ] } + +@test "ui/Service: LoadBalancer assert externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/ui-service.yaml \ + --set 'ui.enabled=true' \ + --set 'server.standalone.enabled=true' \ + --set 'ui.serviceType=LoadBalancer' \ + --set 'ui.externalTrafficPolicy=Foo' \ + . | tee /dev/stderr | + yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "Foo" ] +} + +@test "ui/Service: LoadBalancer assert no externalTrafficPolicy" { + cd `chart_dir` + local actual=$(helm template \ + --show-only templates/ui-service.yaml \ + --set 'ui.enabled=true' \ + --set 'server.standalone.enabled=true' \ + --set 'ui.serviceType=LoadBalancer' \ + --set 'ui.externalTrafficPolicy=' \ + . | tee /dev/stderr | + yq '.spec.externalTrafficPolicy' | tee /dev/stderr) + [ "${actual}" = "null" ] + +} + diff --git a/values.openshift.yaml b/values.openshift.yaml index 96198fe9b..4db41c2e2 100644 --- a/values.openshift.yaml +++ b/values.openshift.yaml @@ -6,13 +6,13 @@ global: injector: image: repository: "registry.connect.redhat.com/hashicorp/vault-k8s" - tag: "0.10.2-ubi" + tag: "0.14.0-ubi" agentImage: repository: "registry.connect.redhat.com/hashicorp/vault" - tag: "1.7.3-ubi" + tag: "1.8.4-ubi" server: image: repository: "registry.connect.redhat.com/hashicorp/vault" - tag: "1.7.3-ubi" + tag: "1.8.4-ubi" diff --git a/values.schema.json b/values.schema.json index db3b80679..4ddbedfaf 100644 --- a/values.schema.json +++ b/values.schema.json @@ -24,6 +24,12 @@ "type": "string" } } + }, + "providersDir": { + "type": "string" + }, + "kubeletRootDir": { + "type": "string" } } }, @@ -80,7 +86,11 @@ ] }, "tolerations": { - "type": ["null", "string"] + "type": [ + "null", + "array", + "string" + ] } } }, @@ -167,7 +177,10 @@ "type": "object", "properties": { "affinity": { - "type": "string" + "type": [ + "object", + "string" + ] }, "agentDefaults": { "type": "object", @@ -186,6 +199,14 @@ }, "template": { "type": "string" + }, + "templateConfig": { + "type": "object", + "properties": { + "exitOnRetryFailure": { + "type": "boolean" + } + } } } }, @@ -280,6 +301,9 @@ }, "ttl": { "type": "string" + }, + "useContainer": { + "type": "boolean" } } }, @@ -301,7 +325,11 @@ "type": "object" }, "nodeSelector": { - "type": ["null", "string"] + "type": [ + "null", + "object", + "string" + ] }, "objectSelector": { "type": "object" @@ -335,6 +363,13 @@ "tolerations": { "type": [ "null", + "array", + "string" + ] + }, + "webhookAnnotations": { + "type": [ + "object", "string" ] } @@ -344,7 +379,10 @@ "type": "object", "properties": { "affinity": { - "type": "string" + "type": [ + "object", + "string" + ] }, "annotations": { "type": [ @@ -541,6 +579,9 @@ "ingress": { "type": "object", "properties": { + "activeService": { + "type": "boolean" + }, "annotations": { "type": [ "object", @@ -567,6 +608,9 @@ } } }, + "ingressClassName": { + "type": "string" + }, "labels": { "type": "object" }, @@ -621,6 +665,7 @@ "nodeSelector": { "type": [ "null", + "object", "string" ] }, @@ -662,6 +707,9 @@ "route": { "type": "object", "properties": { + "activeService": { + "type": "boolean" + }, "annotations": { "type": [ "object", @@ -747,6 +795,7 @@ "tolerations": { "type": [ "null", + "array", "string" ] }, diff --git a/values.yaml b/values.yaml index a46a82868..845761a30 100644 --- a/values.yaml +++ b/values.yaml @@ -5,6 +5,7 @@ global: # will enable or disable all the components within this chart by default. enabled: true # Image pull secret to use for registry authentication. + # Alternatively, the value may be specified as an array of strings. imagePullSecrets: [] # imagePullSecrets: # - name: image-pull-secret @@ -32,10 +33,16 @@ injector: # Configures the port the injector should listen on port: 8080 - # If multiple replicas are specified, by default a leader-elector side-car - # will be created so that only one injector attempts to create TLS certificates. + # If multiple replicas are specified, by default a leader will be determined + # so that only one injector attempts to create TLS certificates. leaderElector: enabled: true + # Note: The deployment of the leader-elector container will soon be removed + # from this chart since vault-k8s now uses an internal mechanism to + # determine leadership. + # To enable the deployment of the leader-elector container for use with + # vault-k8s 0.12.0 and earlier, set `useContainer=true` + useContainer: false image: repository: "gcr.io/google_containers/leader-elector" tag: "0.4" @@ -52,15 +59,15 @@ injector: # image sets the repo and tag of the vault-k8s image to use for the injector. image: repository: "hashicorp/vault-k8s" - tag: "0.10.2" + tag: "0.14.0" pullPolicy: IfNotPresent # agentImage sets the repo and tag of the Vault image to use for the Vault Agent # containers. This should be set to the official Vault image. Vault 1.3.1+ is # required. agentImage: - repository: "vault" - tag: "1.7.3" + repository: "hashicorp/vault" + tag: "1.8.4" # The default values for the injected Vault Agent containers. agentDefaults: @@ -75,6 +82,10 @@ injector: # Possible values include: "json" and "map". template: "map" + # Default values within Agent's template_config stanza. + templateConfig: + exitOnRetryFailure: true + # Mount Path of the Vault Kubernetes Auth Method. authPath: "auth/kubernetes" @@ -114,6 +125,9 @@ injector: # failurePolicy: Ignore + # Extra annotations to attach to the webhook + webhookAnnotations: {} + certs: # secretName is the name of the secret that has the TLS certificate and # private key to serve the injector webhook. If this is null, then the @@ -121,9 +135,10 @@ injector: # a service account to the injector to generate its own certificates. secretName: null - # caBundle is a base64-encoded PEM-encoded certificate bundle for the - # CA that signed the TLS certificate that the webhook serves. This must - # be set if secretName is non-null. + # caBundle is a base64-encoded PEM-encoded certificate bundle for the CA + # that signed the TLS certificate that the webhook serves. This must be set + # if secretName is non-null, unless an external service like cert-manager is + # keeping the caBundle updated. caBundle: "" # certName and keyName are the names of the files within the secret for @@ -147,8 +162,7 @@ injector: # KUBERNETES_SERVICE_HOST: kubernetes.default.svc # Affinity Settings for injector pods - # This should be a multi-line string matching the affinity section of a - # PodSpec. + # This can either be multi-line string or YAML matching the PodSpec's affinity field. # Commenting out or setting as empty the affinity variable, will allow # deployment of multiple replicas to single node services such as Minikube. affinity: | @@ -162,16 +176,16 @@ injector: topologyKey: kubernetes.io/hostname # Toleration Settings for injector pods - # This should be a multi-line string matching the Toleration array + # This should be either a multi-line string or YAML matching the Toleration array # in a PodSpec. - tolerations: null + tolerations: [] - # nodeSelector labels for injector pod assignment, formatted as a muli-line string. + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector # Example: - # nodeSelector: | + # nodeSelector: # beta.kubernetes.io/arch: amd64 - nodeSelector: null + nodeSelector: {} # Priority class for injector pods priorityClassName: "" @@ -215,8 +229,8 @@ server: # By default no direct resource request is made. image: - repository: "vault" - tag: "1.7.9" + repository: "hashicorp/vault" + tag: "1.8.4" # Overrides the default Image Pull Policy pullPolicy: IfNotPresent @@ -255,6 +269,18 @@ server: # or # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" + + # Optionally use ingressClassName instead of deprecated annotation. + # See: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation + ingressClassName: "" + + # As of Kubernetes 1.19, all Ingress Paths must have a pathType configured. The default value below should be sufficient in most cases. + # See: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types for other possible values. + pathType: Prefix + + # When HA mode is enabled and K8s service registration is being used, + # configure the ingress to point to the Vault active service. + activeService: true hosts: - host: chart-example.local paths: [] @@ -262,8 +288,10 @@ server: extraPaths: [] # - path: /* # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation + # service: + # name: ssl-redirect + # port: + # number: use-annotation tls: [] # - secretName: chart-example-tls # hosts: @@ -273,6 +301,11 @@ server: # The created route will be of type passthrough route: enabled: false + + # When HA mode is enabled and K8s service registration is being used, + # configure the route to point to the Vault active service. + activeService: true + labels: {} annotations: {} host: chart-example.local @@ -392,10 +425,10 @@ server: # name: plugins # readOnly: true - # Affinity Settings # Commenting out or setting as empty the affinity variable, will allow # deployment to single node services such as Minikube + # This should be either a multi-line string or YAML matching the PodSpec's affinity field. affinity: | podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -407,16 +440,16 @@ server: topologyKey: kubernetes.io/hostname # Toleration Settings for server pods - # This should be a multi-line string matching the Toleration array + # This should be either a multi-line string or YAML matching the Toleration array # in a PodSpec. - tolerations: null + tolerations: [] - # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector # Example: - # nodeSelector: | + # nodeSelector: # beta.kubernetes.io/arch: amd64 - nodeSelector: null + nodeSelector: {} # Enables network policy for server pods networkPolicy: @@ -457,6 +490,12 @@ server: # NodePort, or LoadBalancer. #type: ClusterIP + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + # If type is set to "NodePort", a specific nodePort value can be configured, # will be random if left blank. #nodePort: 30000 @@ -686,7 +725,13 @@ ui: externalPort: 8200 targetPort: 8200 - # loadBalancerSourceRanges: + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + #loadBalancerSourceRanges: # - 10.0.0.0/16 # - 1.78.23.3/32 @@ -748,6 +793,10 @@ csi: # YAML-formatted multi-line templated string map of the annotations to apply # to the daemonSet. annotations: {} + # Provider host path (must match the CSI provider's path) + providersDir: "/etc/kubernetes/secrets-store-csi-providers" + # Kubelet host path + kubeletRootDir: "/var/lib/kubelet" pod: # Extra annotations for the provider pods. This can either be YAML or a @@ -756,9 +805,9 @@ csi: annotations: {} # Toleration Settings for provider pods - # This should be a multi-line string matching the Toleration array + # This should be either a multi-line string or YAML matching the Toleration array # in a PodSpec. - tolerations: null + tolerations: [] serviceAccount: # Extra annotations for the serviceAccount definition. This can either be