From 83a4ef15dfda6403862437f8acf3606f255e888a Mon Sep 17 00:00:00 2001 From: Peeyush Gupta Date: Tue, 19 Nov 2019 08:58:58 -0500 Subject: [PATCH 01/10] Adding build for ppc64le --- build.make | 1 + 1 file changed, 1 insertion(+) diff --git a/build.make b/build.make index 1b6f35fe1..7075a37ee 100644 --- a/build.make +++ b/build.make @@ -70,6 +70,7 @@ build-%: check-go-version-go CGO_ENABLED=0 GOOS=linux go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$* if [ "$$ARCH" = "amd64" ]; then \ CGO_ENABLED=0 GOOS=windows go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*.exe ./cmd/$* ; \ + CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*-ppc64le ./cmd/$* fi container-%: build-% From 1eaaaa1cb49713e8e4af87d009c24d310cf66260 Mon Sep 17 00:00:00 2001 From: Mucahit Kurt Date: Sat, 16 Nov 2019 06:23:08 +0300 Subject: [PATCH 02/10] Delete kind cluster after tests run. Inside a real Prow job it is better to clean up at runtime instead of leaving that to the Prow job cleanup code because the later sometimes times out. Signed-off-by: Mucahit Kurt --- prow.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/prow.sh b/prow.sh index 3b9621a21..8b431c564 100755 --- a/prow.sh +++ b/prow.sh @@ -580,6 +580,19 @@ EOF export KUBECONFIG } +# Deletes kind cluster inside a prow job +delete_cluster_inside_prow_job() { + # Inside a real Prow job it is better to clean up at runtime + # instead of leaving that to the Prow job cleanup code + # because the later sometimes times out (https://github.com/kubernetes-csi/csi-release-tools/issues/24#issuecomment-554765872). + if [ "$JOB_NAME" ]; then + if kind get clusters | grep -q csi-prow; then + run kind delete cluster --name=csi-prow || die "kind delete failed" + fi + unset KUBECONFIG + fi +} + # Looks for the deployment as specified by CSI_PROW_DEPLOYMENT and CSI_PROW_KUBERNETES_VERSION # in the given directory. find_deployment () { @@ -1017,6 +1030,7 @@ main () { fi fi fi + delete_cluster_inside_prow_job fi if tests_need_alpha_cluster && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then @@ -1047,6 +1061,7 @@ main () { fi fi fi + delete_cluster_inside_prow_job fi fi From 003c14b2d4ae3b1463db5e5b3ff91f39b03f5ba8 Mon Sep 17 00:00:00 2001 From: Grant Griffiths Date: Mon, 11 Nov 2019 23:49:42 -0800 Subject: [PATCH 03/10] Add snapshotter CRDs after cluster setup Signed-off-by: Grant Griffiths --- prow.sh | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/prow.sh b/prow.sh index 3b9621a21..bc9c9f03b 100755 --- a/prow.sh +++ b/prow.sh @@ -322,6 +322,9 @@ configvar CSI_PROW_E2E_ALPHA_GATES_1_16 'VolumeSnapshotDataSource=true' "alpha f configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true' "alpha feature gates for latest Kubernetes" configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" +# Which external-snapshotter tag to use for the snapshotter CRD and snapshot-controller deployment +configvar CSI_SNAPSHOTTER_VERSION 'v2.0.0-rc4' "external-snapshotter version tag" + # Some tests are known to be unusable in a KinD cluster. For example, # stopping kubelet with "ssh systemctl stop kubelet" simply # doesn't work. Such tests should be written in a way that they verify @@ -657,6 +660,59 @@ install_hostpath () { fi } +# Installs all nessesary snapshotter CRDs +install_snapshot_crds() { + # Wait until volumesnapshot CRDs are in place. + CRD_BASE_DIR="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/config/crd" + kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotclasses.yaml" --validate=false + kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshots.yaml" --validate=false + kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotcontents.yaml" --validate=false + cnt=0 + until kubectl get volumesnapshotclasses.snapshot.storage.k8s.io \ + && kubectl get volumesnapshots.snapshot.storage.k8s.io \ + && kubectl get volumesnapshotcontents.snapshot.storage.k8s.io; do + if [ $cnt -gt 30 ]; then + echo >&2 "ERROR: snapshot CRDs not ready after over 1 min" + exit 1 + fi + echo "$(date +%H:%M:%S)" "waiting for snapshot CRDs, attempt #$cnt" + cnt=$((cnt + 1)) + sleep 2 + done +} + +# Install snapshot controller and associated RBAC, retrying until the pod is running. +install_snapshot_controller() { + kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml" + cnt=0 + until kubectl get clusterrolebinding snapshot-controller-role; do + if [ $cnt -gt 30 ]; then + echo "Cluster role bindings:" + kubectl describe clusterrolebinding + echo >&2 "ERROR: snapshot controller RBAC not ready after over 5 min" + exit 1 + fi + echo "$(date +%H:%M:%S)" "waiting for snapshot RBAC setup complete, attempt #$cnt" + cnt=$((cnt + 1)) + sleep 10 + done + + + kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml" + cnt=0 + until kubectl get statefulset snapshot-controller | grep snapshot-controller | grep "1/1"; do + if [ $cnt -gt 30 ]; then + echo "Running statefulsets:" + kubectl describe statefulsets + echo >&2 "ERROR: snapshot controller not ready after over 5 min" + exit 1 + fi + echo "$(date +%H:%M:%S)" "waiting for snapshot controller deployment to complete, attempt #$cnt" + cnt=$((cnt + 1)) + sleep 10 + done +} + # collect logs and cluster status (like the version of all components, Kubernetes version, test version) collect_cluster_info () { cat < Date: Tue, 17 Sep 2019 18:50:50 -0700 Subject: [PATCH 04/10] Add snapshot beta CRD deployment for 1.17 --- deploy/kubernetes-1.17/README.md | 7 + deploy/kubernetes-1.17/deploy-hostpath.sh | 188 +++++++++++++++++ .../hostpath/csi-hostpath-attacher.yaml | 55 +++++ .../hostpath/csi-hostpath-plugin.yaml | 138 +++++++++++++ .../hostpath/csi-hostpath-provisioner.yaml | 55 +++++ .../hostpath/csi-hostpath-snapshotter.yaml | 57 ++++++ .../hostpath/csi-hostpath-testing.yaml | 59 ++++++ deploy/kubernetes-1.17/rbac-snapshotter.yaml | 98 +++++++++ ....storage.k8s.io_volumesnapshotclasses.yaml | 74 +++++++ ...storage.k8s.io_volumesnapshotcontents.yaml | 193 ++++++++++++++++++ ...apshot.storage.k8s.io_volumesnapshots.yaml | 146 +++++++++++++ .../csi-hostpath-snapshotclass.yaml | 6 + 12 files changed, 1076 insertions(+) create mode 100644 deploy/kubernetes-1.17/README.md create mode 100755 deploy/kubernetes-1.17/deploy-hostpath.sh create mode 100644 deploy/kubernetes-1.17/hostpath/csi-hostpath-attacher.yaml create mode 100644 deploy/kubernetes-1.17/hostpath/csi-hostpath-plugin.yaml create mode 100644 deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml create mode 100644 deploy/kubernetes-1.17/hostpath/csi-hostpath-snapshotter.yaml create mode 100644 deploy/kubernetes-1.17/hostpath/csi-hostpath-testing.yaml create mode 100644 deploy/kubernetes-1.17/rbac-snapshotter.yaml create mode 100644 deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml create mode 100644 deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml create mode 100644 deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml create mode 100644 deploy/kubernetes-1.17/snapshotter/csi-hostpath-snapshotclass.yaml diff --git a/deploy/kubernetes-1.17/README.md b/deploy/kubernetes-1.17/README.md new file mode 100644 index 000000000..c5b4c0f20 --- /dev/null +++ b/deploy/kubernetes-1.17/README.md @@ -0,0 +1,7 @@ +The deployment for Kubernetes 1.15 uses CSI 1.0 and thus is +incompatible with Kubernetes < 1.13. + +The sidecars depend on 1.15 API changes for migration and resizing, +and 1.14 API changes for CSIDriver and CSINode. +However the hostpath driver doesn't use those features, so this +deployment can work on older Kubernetes versions. diff --git a/deploy/kubernetes-1.17/deploy-hostpath.sh b/deploy/kubernetes-1.17/deploy-hostpath.sh new file mode 100755 index 000000000..0bfdce469 --- /dev/null +++ b/deploy/kubernetes-1.17/deploy-hostpath.sh @@ -0,0 +1,188 @@ +#!/usr/bin/env bash + +# This script captures the steps required to successfully +# deploy the hostpath plugin driver. This should be considered +# authoritative and all updates for this process should be +# done here and referenced elsewhere. + +# The script assumes that kubectl is available on the OS path +# where it is executed. + +set -e +set -o pipefail +set -x + +BASE_DIR=$(dirname "$0") + +# If set, the following env variables override image registry and/or tag for each of the images. +# They are named after the image name, with hyphen replaced by underscore and in upper case. +# +# - CSI_ATTACHER_REGISTRY +# - CSI_ATTACHER_TAG +# - CSI_NODE_DRIVER_REGISTRAR_REGISTRY +# - CSI_NODE_DRIVER_REGISTRAR_TAG +# - CSI_PROVISIONER_REGISTRY +# - CSI_PROVISIONER_TAG +# - CSI_SNAPSHOTTER_REGISTRY +# - CSI_SNAPSHOTTER_TAG +# - HOSTPATHPLUGIN_REGISTRY +# - HOSTPATHPLUGIN_TAG +# +# Alternatively, it is possible to override all registries or tags with: +# - IMAGE_REGISTRY +# - IMAGE_TAG +# These are used as fallback when the more specific variables are unset or empty. +# +# Beware that the .yaml files do not have "imagePullPolicy: Always". That means that +# also the "canary" images will only be pulled once. This is good for testing +# (starting a pod multiple times will always run with the same canary image), but +# implies that refreshing that image has to be done manually. +# +# As a special case, 'none' as registry removes the registry name. + +# The default is to use the RBAC rules that match the image that is +# being used, also in the case that the image gets overridden. This +# way if there are breaking changes in the RBAC rules, the deployment +# will continue to work. +# +# However, such breaking changes should be rare and only occur when updating +# to a new major version of a sidecar. Nonetheless, to allow testing the scenario +# where the image gets overridden but not the RBAC rules, updating the RBAC +# rules can be disabled. +: ${UPDATE_RBAC_RULES:=true} +function rbac_version () { + yaml="$1" + image="$2" + update_rbac="$3" + + # get version from `image: quay.io/k8scsi/csi-attacher:v1.0.1`, ignoring comments + version="$(sed -e 's/ *#.*$//' "$yaml" | grep "image:.*$image" | sed -e 's/ *#.*//' -e 's/.*://')" + + if $update_rbac; then + # apply overrides + varname=$(echo $image | tr - _ | tr a-z A-Z) + eval version=\${${varname}_TAG:-\${IMAGE_TAG:-\$version}} + fi + + # When using canary images, we have to assume that the + # canary images were built from the corresponding branch. + case "$version" in canary) version=master;; + *-canary) version="$(echo "$version" | sed -e 's/\(.*\)-canary/release-\1/')";; + esac + + echo "$version" +} + +# In addition, the RBAC rules can be overridden separately. +# +#CSI_PROVISIONER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-provisioner.yaml" csi-provisioner false)/deploy/kubernetes/rbac.yaml" +#: ${CSI_PROVISIONER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-provisioner.yaml" csi-provisioner "${UPDATE_RBAC_RULES}")/deploy/kubernetes/rbac.yaml} +#CSI_ATTACHER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-attacher/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-attacher.yaml" csi-attacher false)/deploy/kubernetes/rbac.yaml" +#: ${CSI_ATTACHER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-attacher/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-attacher.yaml" csi-attacher "${UPDATE_RBAC_RULES}")/deploy/kubernetes/rbac.yaml} +# TODO: Change back to dynamic path after image is released officially +#CSI_SNAPSHOTTER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/deploy/kubernetes/rbac.yaml" +#: ${CSI_SNAPSHOTTER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/deploy/kubernetes/rbac.yaml} +# +# Using temporary rbac yaml files +CSI_PROVISIONER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/master/deploy/kubernetes/rbac.yaml" +: ${CSI_PROVISIONER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/master/deploy/kubernetes/rbac.yaml} +CSI_ATTACHER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-attacher/master/deploy/kubernetes/rbac.yaml" +: ${CSI_ATTACHER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-attacher/master/deploy/kubernetes/rbac.yaml} +CSI_SNAPSHOTTER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes/kubernetes/4df841b45e0b9db98de083de8e70d19a157e7bdf/test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml" +: ${CSI_SNAPSHOTTER_RBAC:=https://raw.githubusercontent.com/kubernetes/kubernetes/4df841b45e0b9db98de083de8e70d19a157e7bdf/test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml} + + + +CSI_SNAPSHOTTER_RBAC_YAML="${BASE_DIR}/rbac.yaml" +: ${CSI_SNAPSHOTTER_RBAC:=${BASE_DIR}/rbac.yaml} + +INSTALL_CRD=${INSTALL_CRD:-"false"} + +# Some images are not affected by *_REGISTRY/*_TAG and IMAGE_* variables. +# The default is to update unless explicitly excluded. +update_image () { + case "$1" in socat) return 1;; esac +} + +run () { + echo "$@" >&2 + "$@" +} + +# deploy volume snapshot CRDs +echo "deploying volume snapshot CRDs" +kubectl apply -f ${BASE_DIR}/snapshotter/crd + +# rbac rules +echo "applying RBAC rules" +for component in CSI_PROVISIONER CSI_ATTACHER CSI_SNAPSHOTTER; do + eval current="\${${component}_RBAC}" + eval original="\${${component}_RBAC_YAML}" + if [ "$current" != "$original" ]; then + echo "Using non-default RBAC rules for $component. Changes from $original to $current are:" + diff -c <(wget --quiet -O - "$original") <(if [[ "$current" =~ ^http ]]; then wget --quiet -O - "$current"; else cat "$current"; fi) || true + fi + run kubectl apply -f "${current}" +done + +# deploy hostpath plugin and registrar sidecar +echo "deploying hostpath components" +for i in $(ls ${BASE_DIR}/hostpath/*.yaml | sort); do + echo " $i" + modified="$(cat "$i" | while IFS= read -r line; do + nocomments="$(echo "$line" | sed -e 's/ *#.*$//')" + if echo "$nocomments" | grep -q '^[[:space:]]*image:[[:space:]]*'; then + # Split 'image: quay.io/k8scsi/csi-attacher:v1.0.1' + # into image (quay.io/k8scsi/csi-attacher:v1.0.1), + # registry (quay.io/k8scsi), + # name (csi-attacher), + # tag (v1.0.1). + image=$(echo "$nocomments" | sed -e 's;.*image:[[:space:]]*;;') + registry=$(echo "$image" | sed -e 's;\(.*\)/.*;\1;') + name=$(echo "$image" | sed -e 's;.*/\([^:]*\).*;\1;') + tag=$(echo "$image" | sed -e 's;.*:;;') + + # Variables are with underscores and upper case. + varname=$(echo $name | tr - _ | tr a-z A-Z) + + # Now replace registry and/or tag, if set as env variables. + # If not set, the replacement is the same as the original value. + # Only do this for the images which are meant to be configurable. + if update_image "$name"; then + prefix=$(eval echo \${${varname}_REGISTRY:-${IMAGE_REGISTRY:-${registry}}}/ | sed -e 's;none/;;') + suffix=$(eval echo :\${${varname}_TAG:-${IMAGE_TAG:-${tag}}}) + line="$(echo "$nocomments" | sed -e "s;$image;${prefix}${name}${suffix};")" + fi + echo " using $line" >&2 + fi + echo "$line" + done)" + if ! echo "$modified" | kubectl apply -f -; then + echo "modified version of $i:" + echo "$modified" + exit 1 + fi +done + +# Wait until all pods are running. We have to make some assumptions +# about the deployment here, otherwise we wouldn't know what to wait +# for: the expectation is that we run attacher, provisioner, +# snapshotter, socat and hostpath plugin in the default namespace. +cnt=0 +while [ $(kubectl get pods 2>/dev/null | grep '^csi-hostpath.* Running ' | wc -l) -lt 5 ] || ! kubectl describe volumesnapshotclasses.snapshot.storage.k8s.io 2>/dev/null >/dev/null; do + if [ $cnt -gt 30 ]; then + echo "Running pods:" + kubectl describe pods + + echo >&2 "ERROR: hostpath deployment not ready after over 5min" + exit 1 + fi + echo $(date +%H:%M:%S) "waiting for hostpath deployment to complete, attempt #$cnt" + cnt=$(($cnt + 1)) + sleep 10 +done + + +# deploy snapshotclass +echo "deploying snapshotclass" +kubectl apply -f ${BASE_DIR}/snapshotter/csi-hostpath-snapshotclass.yaml diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-attacher.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-attacher.yaml new file mode 100644 index 000000000..d7c36d2ec --- /dev/null +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-attacher.yaml @@ -0,0 +1,55 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-attacher + labels: + app: csi-hostpath-attacher +spec: + selector: + app: csi-hostpath-attacher + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-attacher +spec: + serviceName: "csi-hostpath-attacher" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-attacher + template: + metadata: + labels: + app: csi-hostpath-attacher + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-hostpathplugin + topologyKey: kubernetes.io/hostname + serviceAccountName: csi-attacher + containers: + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v1.2.0 + args: + - --v=5 + - --csi-address=/csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-plugin.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-plugin.yaml new file mode 100644 index 000000000..29324d793 --- /dev/null +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-plugin.yaml @@ -0,0 +1,138 @@ +# Service defined here, plus serviceName below in StatefulSet, +# are needed only because of condition explained in +# https://github.com/kubernetes/kubernetes/issues/69608 + +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpathplugin + labels: + app: csi-hostpathplugin +spec: + selector: + app: csi-hostpathplugin + ports: + - name: dummy + port: 12345 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpathplugin +spec: + serviceName: "csi-hostpathplugin" + # One replica only: + # Host path driver only works when everything runs + # on a single node. We achieve that by starting it once and then + # co-locate all other pods via inter-pod affinity + replicas: 1 + selector: + matchLabels: + app: csi-hostpathplugin + template: + metadata: + labels: + app: csi-hostpathplugin + spec: + hostNetwork: true + containers: + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/csi-hostpath /registration/csi-hostpath-reg.sock"] + args: + - --v=5 + - --csi-address=/csi/csi.sock + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock + securityContext: + privileged: true + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /registration + name: registration-dir + - mountPath: /csi-data-dir + name: csi-data-dir + + - name: hostpath + image: quay.io/k8scsi/hostpathplugin:v1.1.0 + args: + - "--drivername=hostpath.csi.k8s.io" + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + ports: + - containerPort: 9898 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /var/lib/kubelet/plugins + mountPropagation: Bidirectional + name: plugins-dir + - mountPath: /csi-data-dir + name: csi-data-dir + + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: quay.io/k8scsi/livenessprobe:v1.1.0 + args: + - --csi-address=/csi/csi.sock + - --connection-timeout=3s + - --health-port=9898 + + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/pods + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins + type: Directory + name: plugins-dir + - hostPath: + # 'path' is where PV data is persisted on host. + # using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot + path: /var/lib/csi-hostpath-data/ + type: DirectoryOrCreate + name: csi-data-dir diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml new file mode 100644 index 000000000..74614b517 --- /dev/null +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml @@ -0,0 +1,55 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-provisioner + labels: + app: csi-hostpath-provisioner +spec: + selector: + app: csi-hostpath-provisioner + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-provisioner +spec: + serviceName: "csi-hostpath-provisioner" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-provisioner + template: + metadata: + labels: + app: csi-hostpath-provisioner + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-hostpathplugin + topologyKey: kubernetes.io/hostname + serviceAccountName: csi-provisioner + containers: + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:testsnapshotbeta #v1.4.0-rc1 + args: + - -v=5 + - --csi-address=/csi/csi.sock + - --connection-timeout=15s + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-snapshotter.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-snapshotter.yaml new file mode 100644 index 000000000..9dafc837d --- /dev/null +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-snapshotter.yaml @@ -0,0 +1,57 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-snapshotter + labels: + app: csi-hostpath-snapshotter +spec: + selector: + app: csi-hostpath-snapshotter + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-snapshotter +spec: + serviceName: "csi-hostpath-snapshotter" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-snapshotter + template: + metadata: + labels: + app: csi-hostpath-snapshotter + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-hostpathplugin + topologyKey: kubernetes.io/hostname + serviceAccount: csi-snapshotter + containers: + - name: csi-snapshotter + # TODO: change to official image when released + image: quay.io/k8scsi/csi-snapshotter:testsnapshotbeta + imagePullPolicy: IfNotPresent + args: + - -v=5 + - --csi-address=/csi/csi.sock + - --connection-timeout=15s + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-testing.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-testing.yaml new file mode 100644 index 000000000..3e6b837d1 --- /dev/null +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-testing.yaml @@ -0,0 +1,59 @@ +# WARNING: this is only for testing purposes. Do not install in a production +# cluster. +# +# This exposes the hostpath's Unix domain csi.sock as a TCP port to the +# outside world. The mapping from Unix domain socket to TCP is done +# by socat. +# +# This is useful for testing with csi-sanity or csc. + +apiVersion: v1 +kind: Service +metadata: + name: hostpath-service +spec: + type: NodePort + selector: + app: csi-hostpath-socat + ports: + - port: 10000 # fixed port inside the pod, dynamically allocated port outside +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-socat +spec: + serviceName: "csi-hostpath-socat" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-socat + template: + metadata: + labels: + app: csi-hostpath-socat + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-hostpathplugin + topologyKey: kubernetes.io/hostname + containers: + - name: socat + image: alpine/socat:1.0.3 + args: + - tcp-listen:10000,fork,reuseaddr + - unix-connect:/csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir diff --git a/deploy/kubernetes-1.17/rbac-snapshotter.yaml b/deploy/kubernetes-1.17/rbac-snapshotter.yaml new file mode 100644 index 000000000..0ff547f96 --- /dev/null +++ b/deploy/kubernetes-1.17/rbac-snapshotter.yaml @@ -0,0 +1,98 @@ +# Together with the RBAC file for external-provisioner, this YAML file +# contains all RBAC objects that are necessary to run external CSI +# snapshotter. +# +# In production, each CSI driver deployment has to be customized: +# - to avoid conflicts, use non-default namespace and different names +# for non-namespaced entities like the ClusterRole +# - optionally rename the non-namespaced ClusterRole if there +# are conflicts with other deployments + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshotter + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # rename if there are conflicts + name: external-snapshotter-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-role +subjects: + - kind: ServiceAccount + name: csi-snapshotter + # replace with non-default namespace name + namespace: default +roleRef: + kind: ClusterRole + # change the name also here if the ClusterRole gets renamed + name: external-snapshotter-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: default # TODO: replace with the namespace you want for your sidecar + name: external-snapshotter-leaderelection +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-snapshotter-leaderelection + namespace: default # TODO: replace with the namespace you want for your sidecar +subjects: + - kind: ServiceAccount + name: csi-snapshotter + namespace: default # TODO: replace with the namespace you want for your sidecar +roleRef: + kind: Role + name: external-snapshotter-leaderelection + apiGroup: rbac.authorization.k8s.io + diff --git a/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml b/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml new file mode 100644 index 000000000..dacb532fd --- /dev/null +++ b/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + scope: Cluster + validation: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + deletionPolicy: + allOf: + - enum: + - Delete + - Retain + - enum: + - Delete + - Retain + description: deletionPolicy determines whether a VolumeSnapshotContent created + through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot + is deleted. Supported values are "Retain" and "Delete". "Retain" means + that the VolumeSnapshotContent and its physical snapshot on underlying + storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. Required. + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + type: object + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml b/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml new file mode 100644 index 000000000..60582479d --- /dev/null +++ b/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml @@ -0,0 +1,193 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + type: object + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + allOf: + - enum: + - Delete + - Retain + - enum: + - Delete + - Retain + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. "Delete" + means that the VolumeSnapshotContent and its physical snapshot on + underlying storage system are deleted. In dynamic snapshot creation + case, this field will be filled in with the "DeletionPolicy" field + defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For + pre-existing snapshots, users MUST specify this field when creating + the VolumeSnapshotContent object. Required. + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be the + same as the name returned by the CSI GetPluginName() call for that + driver. Required. + type: string + snapshotClassName: + description: name of the SnapshotClass to which this snapshot belongs. + type: string + source: + description: source specifies from where a snapshot will be created. + This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI name of a pre-existing + snapshot on the underlying storage system. This field is immutable + once specified. + type: string + volumeHandle: + description: volumeHandle specifies the CSI name of the volume from + which a snapshot should be dynamically taken from. This field + is immutable once specified. + type: string + type: object + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to + which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be provided + for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. This timestamp is returned + by the CSI driver after the snapshot is cut. The format of this field + is a Unix nanoseconds time encoded as an int64. On Unix, the command + `date +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the latest observed error during snapshot creation, + if any. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the value returned from CSI "CreateSnapshotRequest" + gRPC call. For pre-existing snapshot, this field will be updated with + the value returned from CSI "ListSnapshots" gRPC call if the corresponding + driver supports. If not specified, it means the readiness of a snapshot + is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. When restoring a volume from this snapshot, the size of + the volume MUST NOT be smaller than the restoreSize if it is specified. + Otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI name of a snapshot on the underlying + storage system. If not specified, it indicates that dynamic snapshot + creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml b/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml new file mode 100644 index 000000000..2a77746c0 --- /dev/null +++ b/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml @@ -0,0 +1,146 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for taking a point-in-time snapshot + of a PersistentVolumeClaim. Upon successful creation of a snapshot by the + underlying storage system, it is bound to a corresponding VolumeSnapshotContent. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + type: object + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object in the same namespace as the VolumeSnapshot + object where the snapshot should be dynamically taken from. This + field is immutable once specified. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing + VolumeSnapshotContent object. This field is immutable once specified. + type: string + type: object + volumeSnapshotClassName: + description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. If not specified, the default snapshot + class will be used if one exists. If not specified, and there is no + default snapshot class, dynamic snapshot creation will fail. Empty + string is not allowed for this field. TODO(xiangqian): a webhook validation + on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' + type: string + required: + - source + type: object + status: + description: 'status represents the current information of a snapshot. NOTE: + status can be modified by sources other than system controllers, and must + not be depended upon for accuracy. Controllers should only use information + from the VolumeSnapshotContent object after verifying that the binding + is accurate and complete.' + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName represents the name of + the VolumeSnapshotContent object to which the VolumeSnapshot object + is bound. If not specified, it indicates that the VolumeSnapshot object + has not been successfully bound to a VolumeSnapshotContent object + yet. NOTE: Specified boundVolumeSnapshotContentName alone does not + mean binding is valid. Controllers MUST always verify bidirectional + binding between VolumeSnapshot and VolumeSnapshotContent to + avoid possible security issues.' + type: string + creationTime: + description: creationTime, if not nil, represents the timestamp when + the point-in-time snapshot was successfully cut on the underlying + storage system. In dynamic snapshot creation case, it will be filled + in upon snapshot creation. For a pre-existing snapshot, it will be + filled in once the VolumeSnapshot object has been successfully bound + to a VolumeSnapshotContent object and the underlying storage system + has the information available. If not specified, it indicates that + the creation time of the snapshot is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, readyToUse + will be set to true after underlying storage system has successfully + finished all out-of-bound procedures to make a snapshot ready to be + used to restore a volume. For a pre-existing snapshot, readyToUse + will be set to the value returned from CSI "ListSnapshots" gRPC call + if the matching CSI driver exists and supports. Otherwise, this field + will be set to "True". If not specified, it indicates that the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. The purpose of this field is to give user guidance on how + much space is needed to restore a volume from this snapshot. When + restoring a volume from a snapshot, the size of the volume MUST NOT + be less than the restoreSize. Otherwise the restoration will fail. + If this field is not specified, it indicates that underlying storage + system does not have the information available. + type: string + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/kubernetes-1.17/snapshotter/csi-hostpath-snapshotclass.yaml b/deploy/kubernetes-1.17/snapshotter/csi-hostpath-snapshotclass.yaml new file mode 100644 index 000000000..892dfd0c8 --- /dev/null +++ b/deploy/kubernetes-1.17/snapshotter/csi-hostpath-snapshotclass.yaml @@ -0,0 +1,6 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: csi-hostpath-snapclass +driver: hostpath.csi.k8s.io #csi-hostpath +deletionPolicy: Delete From 80bba1fe208d1cc4e94a39eead4605771940dd3e Mon Sep 17 00:00:00 2001 From: Sunny Date: Sat, 30 Nov 2019 00:29:00 +0530 Subject: [PATCH 05/10] Use kind v0.6.0 kind v0.6.0 appends the kubeconfig with the default config at ~/.kube/config. --- prow.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/prow.sh b/prow.sh index d719e9808..b88230629 100755 --- a/prow.sh +++ b/prow.sh @@ -107,8 +107,7 @@ configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version # kind version to use. If the pre-installed version is different, # the desired version is downloaded from https://github.com/kubernetes-sigs/kind/releases/download/ # (if available), otherwise it is built from source. -# TODO: https://github.com/kubernetes-csi/csi-release-tools/issues/39 -configvar CSI_PROW_KIND_VERSION "86bc23d84ac12dcb56a0528890736e2c347c2dc3" "kind" +configvar CSI_PROW_KIND_VERSION "v0.6.0" "kind" # ginkgo test runner version to use. If the pre-installed version is # different, the desired version is built from source. @@ -579,8 +578,7 @@ EOF die "Cluster creation failed again, giving up. See the 'kind-cluster' artifact directory for additional logs." fi fi - KUBECONFIG="$(kind get kubeconfig-path --name=csi-prow)" - export KUBECONFIG + export KUBECONFIG="${HOME}/.kube/config" } # Deletes kind cluster inside a prow job From 9a7a685ee169d669a0182532315aeecd8a8715dc Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Mon, 2 Dec 2019 17:45:57 -0800 Subject: [PATCH 06/10] Create a kind cluster with two worker nodes so that the topology feature can be tested. Test cases that test accessing volumes from multiple nodes need to be skipped --- prow.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/prow.sh b/prow.sh index b88230629..815bc9d8a 100755 --- a/prow.sh +++ b/prow.sh @@ -330,7 +330,11 @@ configvar CSI_SNAPSHOTTER_VERSION 'v2.0.0-rc4' "external-snapshotter version tag # whether they can run with the current cluster provider, but until # they are, we filter them out by name. Like the other test selection # variables, this is again a space separated list of regular expressions. -configvar CSI_PROW_E2E_SKIP 'Disruptive' "tests that need to be skipped" +# +# "different node" test skips can be removed once +# https://github.com/kubernetes/kubernetes/pull/82678 has been backported +# to all the K8s versions we test against +configvar CSI_PROW_E2E_SKIP 'Disruptive|different\s+node' "tests that need to be skipped" # This is the directory for additional result files. Usually set by Prow, but # if not (for example, when invoking manually) it defaults to the work directory. @@ -526,6 +530,7 @@ apiVersion: kind.sigs.k8s.io/v1alpha3 nodes: - role: control-plane - role: worker +- role: worker EOF # kubeadm has API dependencies between apiVersion and Kubernetes version @@ -840,10 +845,6 @@ run_e2e () ( install_e2e || die "building e2e.test failed" install_ginkgo || die "installing ginkgo failed" - # TODO (?): multi-node cluster (depends on https://github.com/kubernetes-csi/csi-driver-host-path/pull/14). - # When running on a multi-node cluster, we need to figure out where the - # hostpath driver was deployed and set ClientNodeName accordingly. - generate_test_driver >"${CSI_PROW_WORK}/test-driver.yaml" || die "generating test-driver.yaml failed" # Rename, merge and filter JUnit files. Necessary in case that we run the E2E suite again From 4ad69492c97834b52d74d0b67b6f47b0590e4be1 Mon Sep 17 00:00:00 2001 From: Grant Griffiths Date: Tue, 3 Dec 2019 23:48:29 -0800 Subject: [PATCH 07/10] Improve snapshot pod running checks and improve version_gt Signed-off-by: Grant Griffiths --- prow.sh | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/prow.sh b/prow.sh index d719e9808..27c705129 100755 --- a/prow.sh +++ b/prow.sh @@ -713,10 +713,11 @@ install_snapshot_controller() { kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml" cnt=0 - until kubectl get statefulset snapshot-controller | grep snapshot-controller | grep "1/1"; do + expected_running_pods=$(curl https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/"${CSI_SNAPSHOTTER_VERSION}"/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | grep replicas | cut -d ':' -f 2-) + while [ "$(kubectl get pods -l app=snapshot-controller | grep 'Running' -c)" -lt "$expected_running_pods" ]; do if [ $cnt -gt 30 ]; then - echo "Running statefulsets:" - kubectl describe statefulsets + echo "snapshot-controller pod status:" + kubectl describe pods -l app=snapshot-controller echo >&2 "ERROR: snapshot controller not ready after over 5 min" exit 1 fi @@ -996,8 +997,30 @@ make_test_to_junit () { fi } +# version_gt returns true if arg1 is greater than arg2. +# +# This function expects versions to be one of the following formats: +# X.Y.Z, release-X.Y.Z, vX.Y.Z +# +# where X,Y, and Z are any number. +# +# Partial versions (1.2, release-1.2) work as well. +# The follow substrings are stripped before version comparison: +# - "v" +# - "release-" +# +# Usage: +# version_gt release-1.3 v1.2.0 (returns true) +# version_gt v1.1.1 v1.2.0 (returns false) +# version_gt 1.1.1 v1.2.0 (returns false) +# version_gt 1.3.1 v1.2.0 (returns true) +# version_gt 1.1.1 release-1.2.0 (returns false) +# version_gt 1.2.0 1.2.2 (returns false) function version_gt() { - test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; + versions=$(for ver in "$@"; do ver=${ver#release-}; echo "${ver#v}"; done) + greaterVersion=${1#"release-"}; + greaterVersion=${greaterVersion#"v"}; + test "$(printf '%s' "$versions" | sort -V | head -n 1)" != "$greaterVersion" } main () { From 7fedb9d1b91fcb4cd579fd783d5fd5a0c5bd8fb5 Mon Sep 17 00:00:00 2001 From: Grant Griffiths Date: Wed, 4 Dec 2019 11:40:14 -0800 Subject: [PATCH 08/10] Split snapshotter changes and beta images update for k8s 1.17 Signed-off-by: Grant Griffiths --- deploy/kubernetes-1.17/README.md | 9 +- .../csi-hostpath-snapshotclass.yaml | 6 + deploy/kubernetes-1.17/deploy-hostpath.sh | 189 +---------------- .../hostpath/csi-hostpath-attacher.yaml | 2 +- .../hostpath/csi-hostpath-driverinfo.yaml | 12 ++ .../hostpath/csi-hostpath-plugin.yaml | 11 +- .../hostpath/csi-hostpath-provisioner.yaml | 3 +- .../hostpath/csi-hostpath-resizer.yaml | 54 +++++ .../hostpath/csi-hostpath-snapshotter.yaml | 3 +- deploy/kubernetes-1.17/rbac-snapshotter.yaml | 98 --------- ....storage.k8s.io_volumesnapshotclasses.yaml | 74 ------- ...storage.k8s.io_volumesnapshotcontents.yaml | 193 ------------------ ...apshot.storage.k8s.io_volumesnapshots.yaml | 146 ------------- deploy/kubernetes-latest | 2 +- deploy/util/deploy-hostpath.sh | 42 +++- 15 files changed, 125 insertions(+), 719 deletions(-) create mode 100644 deploy/kubernetes-1.17/csi-hostpath-snapshotclass.yaml mode change 100755 => 120000 deploy/kubernetes-1.17/deploy-hostpath.sh create mode 100644 deploy/kubernetes-1.17/hostpath/csi-hostpath-driverinfo.yaml create mode 100644 deploy/kubernetes-1.17/hostpath/csi-hostpath-resizer.yaml delete mode 100644 deploy/kubernetes-1.17/rbac-snapshotter.yaml delete mode 100644 deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml delete mode 100644 deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml delete mode 100644 deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml diff --git a/deploy/kubernetes-1.17/README.md b/deploy/kubernetes-1.17/README.md index c5b4c0f20..d7178167d 100644 --- a/deploy/kubernetes-1.17/README.md +++ b/deploy/kubernetes-1.17/README.md @@ -1,7 +1,2 @@ -The deployment for Kubernetes 1.15 uses CSI 1.0 and thus is -incompatible with Kubernetes < 1.13. - -The sidecars depend on 1.15 API changes for migration and resizing, -and 1.14 API changes for CSIDriver and CSINode. -However the hostpath driver doesn't use those features, so this -deployment can work on older Kubernetes versions. +The deployment for Kubernetes 1.17 uses VolumeSnapshot Beta CRDs and thus is imcompatible +with Kubernetes < 1.17 when the VolumeSnapshot CRDs were Alpha. diff --git a/deploy/kubernetes-1.17/csi-hostpath-snapshotclass.yaml b/deploy/kubernetes-1.17/csi-hostpath-snapshotclass.yaml new file mode 100644 index 000000000..892dfd0c8 --- /dev/null +++ b/deploy/kubernetes-1.17/csi-hostpath-snapshotclass.yaml @@ -0,0 +1,6 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: csi-hostpath-snapclass +driver: hostpath.csi.k8s.io #csi-hostpath +deletionPolicy: Delete diff --git a/deploy/kubernetes-1.17/deploy-hostpath.sh b/deploy/kubernetes-1.17/deploy-hostpath.sh deleted file mode 100755 index 0bfdce469..000000000 --- a/deploy/kubernetes-1.17/deploy-hostpath.sh +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/env bash - -# This script captures the steps required to successfully -# deploy the hostpath plugin driver. This should be considered -# authoritative and all updates for this process should be -# done here and referenced elsewhere. - -# The script assumes that kubectl is available on the OS path -# where it is executed. - -set -e -set -o pipefail -set -x - -BASE_DIR=$(dirname "$0") - -# If set, the following env variables override image registry and/or tag for each of the images. -# They are named after the image name, with hyphen replaced by underscore and in upper case. -# -# - CSI_ATTACHER_REGISTRY -# - CSI_ATTACHER_TAG -# - CSI_NODE_DRIVER_REGISTRAR_REGISTRY -# - CSI_NODE_DRIVER_REGISTRAR_TAG -# - CSI_PROVISIONER_REGISTRY -# - CSI_PROVISIONER_TAG -# - CSI_SNAPSHOTTER_REGISTRY -# - CSI_SNAPSHOTTER_TAG -# - HOSTPATHPLUGIN_REGISTRY -# - HOSTPATHPLUGIN_TAG -# -# Alternatively, it is possible to override all registries or tags with: -# - IMAGE_REGISTRY -# - IMAGE_TAG -# These are used as fallback when the more specific variables are unset or empty. -# -# Beware that the .yaml files do not have "imagePullPolicy: Always". That means that -# also the "canary" images will only be pulled once. This is good for testing -# (starting a pod multiple times will always run with the same canary image), but -# implies that refreshing that image has to be done manually. -# -# As a special case, 'none' as registry removes the registry name. - -# The default is to use the RBAC rules that match the image that is -# being used, also in the case that the image gets overridden. This -# way if there are breaking changes in the RBAC rules, the deployment -# will continue to work. -# -# However, such breaking changes should be rare and only occur when updating -# to a new major version of a sidecar. Nonetheless, to allow testing the scenario -# where the image gets overridden but not the RBAC rules, updating the RBAC -# rules can be disabled. -: ${UPDATE_RBAC_RULES:=true} -function rbac_version () { - yaml="$1" - image="$2" - update_rbac="$3" - - # get version from `image: quay.io/k8scsi/csi-attacher:v1.0.1`, ignoring comments - version="$(sed -e 's/ *#.*$//' "$yaml" | grep "image:.*$image" | sed -e 's/ *#.*//' -e 's/.*://')" - - if $update_rbac; then - # apply overrides - varname=$(echo $image | tr - _ | tr a-z A-Z) - eval version=\${${varname}_TAG:-\${IMAGE_TAG:-\$version}} - fi - - # When using canary images, we have to assume that the - # canary images were built from the corresponding branch. - case "$version" in canary) version=master;; - *-canary) version="$(echo "$version" | sed -e 's/\(.*\)-canary/release-\1/')";; - esac - - echo "$version" -} - -# In addition, the RBAC rules can be overridden separately. -# -#CSI_PROVISIONER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-provisioner.yaml" csi-provisioner false)/deploy/kubernetes/rbac.yaml" -#: ${CSI_PROVISIONER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-provisioner.yaml" csi-provisioner "${UPDATE_RBAC_RULES}")/deploy/kubernetes/rbac.yaml} -#CSI_ATTACHER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-attacher/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-attacher.yaml" csi-attacher false)/deploy/kubernetes/rbac.yaml" -#: ${CSI_ATTACHER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-attacher/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-attacher.yaml" csi-attacher "${UPDATE_RBAC_RULES}")/deploy/kubernetes/rbac.yaml} -# TODO: Change back to dynamic path after image is released officially -#CSI_SNAPSHOTTER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/deploy/kubernetes/rbac.yaml" -#: ${CSI_SNAPSHOTTER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/deploy/kubernetes/rbac.yaml} -# -# Using temporary rbac yaml files -CSI_PROVISIONER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/master/deploy/kubernetes/rbac.yaml" -: ${CSI_PROVISIONER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/master/deploy/kubernetes/rbac.yaml} -CSI_ATTACHER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-attacher/master/deploy/kubernetes/rbac.yaml" -: ${CSI_ATTACHER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-attacher/master/deploy/kubernetes/rbac.yaml} -CSI_SNAPSHOTTER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes/kubernetes/4df841b45e0b9db98de083de8e70d19a157e7bdf/test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml" -: ${CSI_SNAPSHOTTER_RBAC:=https://raw.githubusercontent.com/kubernetes/kubernetes/4df841b45e0b9db98de083de8e70d19a157e7bdf/test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml} - - - -CSI_SNAPSHOTTER_RBAC_YAML="${BASE_DIR}/rbac.yaml" -: ${CSI_SNAPSHOTTER_RBAC:=${BASE_DIR}/rbac.yaml} - -INSTALL_CRD=${INSTALL_CRD:-"false"} - -# Some images are not affected by *_REGISTRY/*_TAG and IMAGE_* variables. -# The default is to update unless explicitly excluded. -update_image () { - case "$1" in socat) return 1;; esac -} - -run () { - echo "$@" >&2 - "$@" -} - -# deploy volume snapshot CRDs -echo "deploying volume snapshot CRDs" -kubectl apply -f ${BASE_DIR}/snapshotter/crd - -# rbac rules -echo "applying RBAC rules" -for component in CSI_PROVISIONER CSI_ATTACHER CSI_SNAPSHOTTER; do - eval current="\${${component}_RBAC}" - eval original="\${${component}_RBAC_YAML}" - if [ "$current" != "$original" ]; then - echo "Using non-default RBAC rules for $component. Changes from $original to $current are:" - diff -c <(wget --quiet -O - "$original") <(if [[ "$current" =~ ^http ]]; then wget --quiet -O - "$current"; else cat "$current"; fi) || true - fi - run kubectl apply -f "${current}" -done - -# deploy hostpath plugin and registrar sidecar -echo "deploying hostpath components" -for i in $(ls ${BASE_DIR}/hostpath/*.yaml | sort); do - echo " $i" - modified="$(cat "$i" | while IFS= read -r line; do - nocomments="$(echo "$line" | sed -e 's/ *#.*$//')" - if echo "$nocomments" | grep -q '^[[:space:]]*image:[[:space:]]*'; then - # Split 'image: quay.io/k8scsi/csi-attacher:v1.0.1' - # into image (quay.io/k8scsi/csi-attacher:v1.0.1), - # registry (quay.io/k8scsi), - # name (csi-attacher), - # tag (v1.0.1). - image=$(echo "$nocomments" | sed -e 's;.*image:[[:space:]]*;;') - registry=$(echo "$image" | sed -e 's;\(.*\)/.*;\1;') - name=$(echo "$image" | sed -e 's;.*/\([^:]*\).*;\1;') - tag=$(echo "$image" | sed -e 's;.*:;;') - - # Variables are with underscores and upper case. - varname=$(echo $name | tr - _ | tr a-z A-Z) - - # Now replace registry and/or tag, if set as env variables. - # If not set, the replacement is the same as the original value. - # Only do this for the images which are meant to be configurable. - if update_image "$name"; then - prefix=$(eval echo \${${varname}_REGISTRY:-${IMAGE_REGISTRY:-${registry}}}/ | sed -e 's;none/;;') - suffix=$(eval echo :\${${varname}_TAG:-${IMAGE_TAG:-${tag}}}) - line="$(echo "$nocomments" | sed -e "s;$image;${prefix}${name}${suffix};")" - fi - echo " using $line" >&2 - fi - echo "$line" - done)" - if ! echo "$modified" | kubectl apply -f -; then - echo "modified version of $i:" - echo "$modified" - exit 1 - fi -done - -# Wait until all pods are running. We have to make some assumptions -# about the deployment here, otherwise we wouldn't know what to wait -# for: the expectation is that we run attacher, provisioner, -# snapshotter, socat and hostpath plugin in the default namespace. -cnt=0 -while [ $(kubectl get pods 2>/dev/null | grep '^csi-hostpath.* Running ' | wc -l) -lt 5 ] || ! kubectl describe volumesnapshotclasses.snapshot.storage.k8s.io 2>/dev/null >/dev/null; do - if [ $cnt -gt 30 ]; then - echo "Running pods:" - kubectl describe pods - - echo >&2 "ERROR: hostpath deployment not ready after over 5min" - exit 1 - fi - echo $(date +%H:%M:%S) "waiting for hostpath deployment to complete, attempt #$cnt" - cnt=$(($cnt + 1)) - sleep 10 -done - - -# deploy snapshotclass -echo "deploying snapshotclass" -kubectl apply -f ${BASE_DIR}/snapshotter/csi-hostpath-snapshotclass.yaml diff --git a/deploy/kubernetes-1.17/deploy-hostpath.sh b/deploy/kubernetes-1.17/deploy-hostpath.sh new file mode 120000 index 000000000..589c43f62 --- /dev/null +++ b/deploy/kubernetes-1.17/deploy-hostpath.sh @@ -0,0 +1 @@ +../util/deploy-hostpath.sh \ No newline at end of file diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-attacher.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-attacher.yaml index d7c36d2ec..823763a91 100644 --- a/deploy/kubernetes-1.17/hostpath/csi-hostpath-attacher.yaml +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-attacher.yaml @@ -40,7 +40,7 @@ spec: serviceAccountName: csi-attacher containers: - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v1.2.0 + image: quay.io/k8scsi/csi-attacher:v2.0.0 args: - --v=5 - --csi-address=/csi/csi.sock diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-driverinfo.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-driverinfo.yaml new file mode 100644 index 000000000..47d6486be --- /dev/null +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-driverinfo.yaml @@ -0,0 +1,12 @@ +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: hostpath.csi.k8s.io +spec: + # Supports persistent and ephemeral inline volumes. + volumeLifecycleModes: + - Persistent + - Ephemeral + # To determine at runtime which mode a volume uses, pod info and its + # "csi.storage.k8s.io/ephemeral" entry are needed. + podInfoOnMount: true diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-plugin.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-plugin.yaml index 29324d793..2d54b74e8 100644 --- a/deploy/kubernetes-1.17/hostpath/csi-hostpath-plugin.yaml +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-plugin.yaml @@ -37,7 +37,7 @@ spec: hostNetwork: true containers: - name: node-driver-registrar - image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 lifecycle: preStop: exec: @@ -63,7 +63,7 @@ spec: name: csi-data-dir - name: hostpath - image: quay.io/k8scsi/hostpathplugin:v1.1.0 + image: quay.io/k8scsi/hostpathplugin:v1.2.0 args: - "--drivername=hostpath.csi.k8s.io" - "--v=5" @@ -102,7 +102,8 @@ spec: name: plugins-dir - mountPath: /csi-data-dir name: csi-data-dir - + - mountPath: /dev + name: dev-dir - name: liveness-probe volumeMounts: - mountPath: /csi @@ -136,3 +137,7 @@ spec: path: /var/lib/csi-hostpath-data/ type: DirectoryOrCreate name: csi-data-dir + - hostPath: + path: /dev + type: Directory + name: dev-dir diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml index 74614b517..6afd1e9ef 100644 --- a/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml @@ -40,11 +40,10 @@ spec: serviceAccountName: csi-provisioner containers: - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:testsnapshotbeta #v1.4.0-rc1 + image: quay.io/k8scsi/csi-provisioner:v1.5.0-rc1 args: - -v=5 - --csi-address=/csi/csi.sock - - --connection-timeout=15s volumeMounts: - mountPath: /csi name: socket-dir diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-resizer.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-resizer.yaml new file mode 100644 index 000000000..77af2afb0 --- /dev/null +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-resizer.yaml @@ -0,0 +1,54 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-resizer + labels: + app: csi-hostpath-resizer +spec: + selector: + app: csi-hostpath-resizer + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-resizer +spec: + serviceName: "csi-hostpath-resizer" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-resizer + template: + metadata: + labels: + app: csi-hostpath-resizer + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-hostpathplugin + topologyKey: kubernetes.io/hostname + serviceAccountName: csi-resizer + containers: + - name: csi-resizer + image: quay.io/k8scsi/csi-resizer:v0.3.0 + args: + - -v=5 + - -csi-address=/csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-snapshotter.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-snapshotter.yaml index 9dafc837d..131dc9cb9 100644 --- a/deploy/kubernetes-1.17/hostpath/csi-hostpath-snapshotter.yaml +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-snapshotter.yaml @@ -41,12 +41,11 @@ spec: containers: - name: csi-snapshotter # TODO: change to official image when released - image: quay.io/k8scsi/csi-snapshotter:testsnapshotbeta + image: quay.io/k8scsi/csi-snapshotter:v2.0.0-rc2 imagePullPolicy: IfNotPresent args: - -v=5 - --csi-address=/csi/csi.sock - - --connection-timeout=15s volumeMounts: - mountPath: /csi name: socket-dir diff --git a/deploy/kubernetes-1.17/rbac-snapshotter.yaml b/deploy/kubernetes-1.17/rbac-snapshotter.yaml deleted file mode 100644 index 0ff547f96..000000000 --- a/deploy/kubernetes-1.17/rbac-snapshotter.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# Together with the RBAC file for external-provisioner, this YAML file -# contains all RBAC objects that are necessary to run external CSI -# snapshotter. -# -# In production, each CSI driver deployment has to be customized: -# - to avoid conflicts, use non-default namespace and different names -# for non-namespaced entities like the ClusterRole -# - optionally rename the non-namespaced ClusterRole if there -# are conflicts with other deployments - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: csi-snapshotter - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - # rename if there are conflicts - name: external-snapshotter-runner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents/status"] - verbs: ["update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots/status"] - verbs: ["update"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create", "list", "watch", "delete", "get", "update"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-snapshotter-role -subjects: - - kind: ServiceAccount - name: csi-snapshotter - # replace with non-default namespace name - namespace: default -roleRef: - kind: ClusterRole - # change the name also here if the ClusterRole gets renamed - name: external-snapshotter-runner - apiGroup: rbac.authorization.k8s.io - ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: default # TODO: replace with the namespace you want for your sidecar - name: external-snapshotter-leaderelection -rules: -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-snapshotter-leaderelection - namespace: default # TODO: replace with the namespace you want for your sidecar -subjects: - - kind: ServiceAccount - name: csi-snapshotter - namespace: default # TODO: replace with the namespace you want for your sidecar -roleRef: - kind: Role - name: external-snapshotter-leaderelection - apiGroup: rbac.authorization.k8s.io - diff --git a/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml b/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml deleted file mode 100644 index dacb532fd..000000000 --- a/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml +++ /dev/null @@ -1,74 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - name: volumesnapshotclasses.snapshot.storage.k8s.io -spec: - group: snapshot.storage.k8s.io - names: - kind: VolumeSnapshotClass - listKind: VolumeSnapshotClassList - plural: volumesnapshotclasses - singular: volumesnapshotclass - scope: Cluster - validation: - openAPIV3Schema: - description: VolumeSnapshotClass specifies parameters that a underlying storage - system uses when creating a volume snapshot. A specific VolumeSnapshotClass - is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses - are non-namespaced - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - deletionPolicy: - allOf: - - enum: - - Delete - - Retain - - enum: - - Delete - - Retain - description: deletionPolicy determines whether a VolumeSnapshotContent created - through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot - is deleted. Supported values are "Retain" and "Delete". "Retain" means - that the VolumeSnapshotContent and its physical snapshot on underlying - storage system are kept. "Delete" means that the VolumeSnapshotContent - and its physical snapshot on underlying storage system are deleted. Required. - type: string - driver: - description: driver is the name of the storage driver that handles this - VolumeSnapshotClass. Required. - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - type: object - parameters: - additionalProperties: - type: string - description: parameters is a key-value map with storage driver specific - parameters for creating snapshots. These values are opaque to Kubernetes. - type: object - required: - - deletionPolicy - - driver - type: object - version: v1beta1 - versions: - - name: v1beta1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml b/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml deleted file mode 100644 index 60582479d..000000000 --- a/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml +++ /dev/null @@ -1,193 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - name: volumesnapshotcontents.snapshot.storage.k8s.io -spec: - group: snapshot.storage.k8s.io - names: - kind: VolumeSnapshotContent - listKind: VolumeSnapshotContentList - plural: volumesnapshotcontents - singular: volumesnapshotcontent - scope: Cluster - subresources: - status: {} - validation: - openAPIV3Schema: - description: VolumeSnapshotContent represents the actual "on-disk" snapshot - object in the underlying storage system - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - type: object - spec: - description: spec defines properties of a VolumeSnapshotContent created - by the underlying storage system. Required. - properties: - deletionPolicy: - allOf: - - enum: - - Delete - - Retain - - enum: - - Delete - - Retain - description: deletionPolicy determines whether this VolumeSnapshotContent - and its physical snapshot on the underlying storage system should - be deleted when its bound VolumeSnapshot is deleted. Supported values - are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent - and its physical snapshot on underlying storage system are kept. "Delete" - means that the VolumeSnapshotContent and its physical snapshot on - underlying storage system are deleted. In dynamic snapshot creation - case, this field will be filled in with the "DeletionPolicy" field - defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For - pre-existing snapshots, users MUST specify this field when creating - the VolumeSnapshotContent object. Required. - type: string - driver: - description: driver is the name of the CSI driver used to create the - physical snapshot on the underlying storage system. This MUST be the - same as the name returned by the CSI GetPluginName() call for that - driver. Required. - type: string - snapshotClassName: - description: name of the SnapshotClass to which this snapshot belongs. - type: string - source: - description: source specifies from where a snapshot will be created. - This field is immutable after creation. Required. - properties: - snapshotHandle: - description: snapshotHandle specifies the CSI name of a pre-existing - snapshot on the underlying storage system. This field is immutable - once specified. - type: string - volumeHandle: - description: volumeHandle specifies the CSI name of the volume from - which a snapshot should be dynamically taken from. This field - is immutable once specified. - type: string - type: object - volumeSnapshotRef: - description: volumeSnapshotRef specifies the VolumeSnapshot object to - which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName - field must reference to this VolumeSnapshotContent's name for the - bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent - object, name and namespace of the VolumeSnapshot object MUST be provided - for binding to happen. This field is immutable after creation. Required. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of an - entire object, this string should contain a valid JSON/Go field - access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part of an object. - TODO: this design is not final and this field is subject to change - in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference is - made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - type: object - required: - - deletionPolicy - - driver - - source - - volumeSnapshotRef - type: object - status: - description: status represents the current information of a snapshot. - properties: - creationTime: - description: creationTime is the timestamp when the point-in-time snapshot - is taken by the underlying storage system. This timestamp is returned - by the CSI driver after the snapshot is cut. The format of this field - is a Unix nanoseconds time encoded as an int64. On Unix, the command - `date +%s%N` returns the current time in nanoseconds since 1970-01-01 - 00:00:00 UTC. - format: int64 - type: integer - error: - description: error is the latest observed error during snapshot creation, - if any. - properties: - message: - description: 'message is a string detailing the encountered error - during snapshot creation if specified. NOTE: message may be logged, - and it should not contain sensitive information.' - type: string - time: - description: time is the timestamp when the error was encountered. - format: date-time - type: string - type: object - readyToUse: - description: readyToUse indicates if a snapshot is ready to be used - to restore a volume. In dynamic snapshot creation case, this field - will be filled in with the value returned from CSI "CreateSnapshotRequest" - gRPC call. For pre-existing snapshot, this field will be updated with - the value returned from CSI "ListSnapshots" gRPC call if the corresponding - driver supports. If not specified, it means the readiness of a snapshot - is unknown. - type: boolean - restoreSize: - description: restoreSize represents the complete size of the snapshot - in bytes. When restoring a volume from this snapshot, the size of - the volume MUST NOT be smaller than the restoreSize if it is specified. - Otherwise the restoration will fail. If not specified, it indicates - that the size is unknown. - format: int64 - minimum: 0 - type: integer - snapshotHandle: - description: snapshotHandle is the CSI name of a snapshot on the underlying - storage system. If not specified, it indicates that dynamic snapshot - creation has either failed or it is still in progress. - type: string - type: object - required: - - spec - type: object - version: v1beta1 - versions: - - name: v1beta1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml b/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml deleted file mode 100644 index 2a77746c0..000000000 --- a/deploy/kubernetes-1.17/snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml +++ /dev/null @@ -1,146 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - name: volumesnapshots.snapshot.storage.k8s.io -spec: - group: snapshot.storage.k8s.io - names: - kind: VolumeSnapshot - listKind: VolumeSnapshotList - plural: volumesnapshots - singular: volumesnapshot - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: VolumeSnapshot is a user's request for taking a point-in-time snapshot - of a PersistentVolumeClaim. Upon successful creation of a snapshot by the - underlying storage system, it is bound to a corresponding VolumeSnapshotContent. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - type: object - spec: - description: 'spec defines the desired characteristics of a snapshot requested - by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots - Required.' - properties: - source: - description: source specifies where a snapshot will be created from. - This field is immutable after creation. Required. - properties: - persistentVolumeClaimName: - description: persistentVolumeClaimName specifies the name of the - PersistentVolumeClaim object in the same namespace as the VolumeSnapshot - object where the snapshot should be dynamically taken from. This - field is immutable once specified. - type: string - volumeSnapshotContentName: - description: volumeSnapshotContentName specifies the name of a pre-existing - VolumeSnapshotContent object. This field is immutable once specified. - type: string - type: object - volumeSnapshotClassName: - description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass - requested by the VolumeSnapshot. If not specified, the default snapshot - class will be used if one exists. If not specified, and there is no - default snapshot class, dynamic snapshot creation will fail. Empty - string is not allowed for this field. TODO(xiangqian): a webhook validation - on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' - type: string - required: - - source - type: object - status: - description: 'status represents the current information of a snapshot. NOTE: - status can be modified by sources other than system controllers, and must - not be depended upon for accuracy. Controllers should only use information - from the VolumeSnapshotContent object after verifying that the binding - is accurate and complete.' - properties: - boundVolumeSnapshotContentName: - description: 'boundVolumeSnapshotContentName represents the name of - the VolumeSnapshotContent object to which the VolumeSnapshot object - is bound. If not specified, it indicates that the VolumeSnapshot object - has not been successfully bound to a VolumeSnapshotContent object - yet. NOTE: Specified boundVolumeSnapshotContentName alone does not - mean binding is valid. Controllers MUST always verify bidirectional - binding between VolumeSnapshot and VolumeSnapshotContent to - avoid possible security issues.' - type: string - creationTime: - description: creationTime, if not nil, represents the timestamp when - the point-in-time snapshot was successfully cut on the underlying - storage system. In dynamic snapshot creation case, it will be filled - in upon snapshot creation. For a pre-existing snapshot, it will be - filled in once the VolumeSnapshot object has been successfully bound - to a VolumeSnapshotContent object and the underlying storage system - has the information available. If not specified, it indicates that - the creation time of the snapshot is unknown. - format: date-time - type: string - error: - description: error is the last observed error during snapshot creation, - if any. This field could be helpful to upper level controllers(i.e., - application controller) to decide whether they should continue on - waiting for the snapshot to be created based on the type of error - reported. - properties: - message: - description: 'message is a string detailing the encountered error - during snapshot creation if specified. NOTE: message may be logged, - and it should not contain sensitive information.' - type: string - time: - description: time is the timestamp when the error was encountered. - format: date-time - type: string - type: object - readyToUse: - description: readyToUse indicates if a snapshot is ready to be used - to restore a volume. In dynamic snapshot creation case, readyToUse - will be set to true after underlying storage system has successfully - finished all out-of-bound procedures to make a snapshot ready to be - used to restore a volume. For a pre-existing snapshot, readyToUse - will be set to the value returned from CSI "ListSnapshots" gRPC call - if the matching CSI driver exists and supports. Otherwise, this field - will be set to "True". If not specified, it indicates that the readiness - of a snapshot is unknown. - type: boolean - restoreSize: - description: restoreSize represents the complete size of the snapshot - in bytes. The purpose of this field is to give user guidance on how - much space is needed to restore a volume from this snapshot. When - restoring a volume from a snapshot, the size of the volume MUST NOT - be less than the restoreSize. Otherwise the restoration will fail. - If this field is not specified, it indicates that underlying storage - system does not have the information available. - type: string - type: object - required: - - spec - type: object - version: v1beta1 - versions: - - name: v1beta1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/deploy/kubernetes-latest b/deploy/kubernetes-latest index 5ca56f454..588027544 120000 --- a/deploy/kubernetes-latest +++ b/deploy/kubernetes-latest @@ -1 +1 @@ -kubernetes-1.16 \ No newline at end of file +kubernetes-1.17 \ No newline at end of file diff --git a/deploy/util/deploy-hostpath.sh b/deploy/util/deploy-hostpath.sh index 81e92bd9c..69d790de0 100755 --- a/deploy/util/deploy-hostpath.sh +++ b/deploy/util/deploy-hostpath.sh @@ -82,13 +82,46 @@ function rbac_version () { echo "$version" } +# version_gt returns true if arg1 is greater than arg2. +# +# This function expects versions to be one of the following formats: +# X.Y.Z, release-X.Y.Z, vX.Y.Z +# +# where X,Y, and Z are any number. +# +# Partial versions (1.2, release-1.2) work as well. +# The follow substrings are stripped before version comparison: +# - "v" +# - "release-" +# +# Usage: +# version_gt release-1.3 v1.2.0 (returns true) +# version_gt v1.1.1 v1.2.0 (returns false) +# version_gt 1.1.1 v1.2.0 (returns false) +# version_gt 1.3.1 v1.2.0 (returns true) +# version_gt 1.1.1 release-1.2.0 (returns false) +# version_gt 1.2.0 1.2.2 (returns false) +function version_gt() { + versions=$(for ver in "$@"; do ver=${ver#release-}; echo ${ver#v}; done) + greaterVersion=${1#"release-"}; + greaterVersion=${greaterVersion#"v"}; + test "$(printf '%s' "$versions" | sort -V | head -n 1)" != "$greaterVersion" +} + # In addition, the RBAC rules can be overridden separately. +# For snapshotter 2.0+, the directory has changed. +SNAPSHOTTER_RBAC_RELATIVE_PATH="rbac.yaml" +if version_gt $(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-snapshotter.yaml" csi-snapshotter "${UPDATE_RBAC_RULES}") "v1.255.255"; then + SNAPSHOTTER_RBAC_RELATIVE_PATH="csi-snapshotter/rbac-csi-snapshotter.yaml" +fi +echo "SNAPSHOTTER_RBAC_RELATIVE_PATH $SNAPSHOTTER_RBAC_RELATIVE_PATH" + CSI_PROVISIONER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-provisioner.yaml" csi-provisioner false)/deploy/kubernetes/rbac.yaml" : ${CSI_PROVISIONER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-provisioner.yaml" csi-provisioner "${UPDATE_RBAC_RULES}")/deploy/kubernetes/rbac.yaml} CSI_ATTACHER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-attacher/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-attacher.yaml" csi-attacher false)/deploy/kubernetes/rbac.yaml" : ${CSI_ATTACHER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-attacher/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-attacher.yaml" csi-attacher "${UPDATE_RBAC_RULES}")/deploy/kubernetes/rbac.yaml} -CSI_SNAPSHOTTER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-snapshotter.yaml" csi-snapshotter false)/deploy/kubernetes/rbac.yaml" -: ${CSI_SNAPSHOTTER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-snapshotter.yaml" csi-snapshotter "${UPDATE_RBAC_RULES}")/deploy/kubernetes/rbac.yaml} +CSI_SNAPSHOTTER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-snapshotter.yaml" csi-snapshotter false)/deploy/kubernetes/${SNAPSHOTTER_RBAC_RELATIVE_PATH}" +: ${CSI_SNAPSHOTTER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-snapshotter.yaml" csi-snapshotter "${UPDATE_RBAC_RULES}")/deploy/kubernetes/${SNAPSHOTTER_RBAC_RELATIVE_PATH} CSI_RESIZER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-resizer/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-resizer.yaml" csi-resizer false)/deploy/kubernetes/rbac.yaml" : ${CSI_RESIZER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-resizer/$(rbac_version "${BASE_DIR}/hostpath/csi-hostpath-resizer.yaml" csi-resizer "${UPDATE_RBAC_RULES}")/deploy/kubernetes/rbac.yaml} @@ -146,7 +179,7 @@ for i in $(ls ${BASE_DIR}/hostpath/*.yaml | sort); do [ -f ${BASE_DIR}/canary-blacklist.txt ] && grep -q "^$name\$" ${BASE_DIR}/canary-blacklist.txt; then # Ignore IMAGE_TAG=canary for this particular image because its - # canary image is blacklisted in the deployment's blacklist. + # canary image is blacklisted in the deployment blacklist. suffix=$(eval echo :\${${varname}_TAG:-${tag}}) else suffix=$(eval echo :\${${varname}_TAG:-${IMAGE_TAG:-${tag}}}) @@ -186,4 +219,5 @@ done # deploy snapshotclass echo "deploying snapshotclass" -kubectl apply -f ${BASE_DIR}/snapshotter/csi-hostpath-snapshotclass.yaml +SNAPSHOTCLASS_PATH="${BASE_DIR}/snapshotter/csi-hostpath-snapshotclass.yaml" +kubectl apply -f $SNAPSHOTCLASS_PATH From a4e629966848d0097461f49f8aab0dfeb68d7a68 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Wed, 4 Dec 2019 14:12:50 -0800 Subject: [PATCH 09/10] fix syntax for ppc64le build --- build.make | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.make b/build.make index 7075a37ee..a9b9d25dc 100644 --- a/build.make +++ b/build.make @@ -70,7 +70,7 @@ build-%: check-go-version-go CGO_ENABLED=0 GOOS=linux go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$* if [ "$$ARCH" = "amd64" ]; then \ CGO_ENABLED=0 GOOS=windows go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*.exe ./cmd/$* ; \ - CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*-ppc64le ./cmd/$* + CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*-ppc64le ./cmd/$* ; \ fi container-%: build-% From c10d111b0eb7f26bd02a6a28bd4712e56e46e55c Mon Sep 17 00:00:00 2001 From: Grant Griffiths Date: Wed, 4 Dec 2019 18:07:09 -0800 Subject: [PATCH 10/10] Add topology feature gate to provisioner for k8s 1.17 Signed-off-by: Grant Griffiths --- deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml b/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml index 6afd1e9ef..85904dfb1 100644 --- a/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml +++ b/deploy/kubernetes-1.17/hostpath/csi-hostpath-provisioner.yaml @@ -44,6 +44,7 @@ spec: args: - -v=5 - --csi-address=/csi/csi.sock + - --feature-gates=Topology=true volumeMounts: - mountPath: /csi name: socket-dir