diff --git a/charts/README.md b/charts/README.md index 30630ae2f73..af775ef6ecb 100644 --- a/charts/README.md +++ b/charts/README.md @@ -77,7 +77,7 @@ The following table lists the configurable parameters of the latest SMB CSI Driv | `controller.workingMountDir` | working directory for provisioner to mount smb shares temporarily | `/tmp` | | `controller.runOnMaster` | run controller on master node | `false` | | `controller.runOnControlPlane` | run controller on control plane node | `false` | -| `controller.resources.csiProvisioner.limits.memory` | csi-provisioner memory limits | `100Mi` | +| `controller.resources.csiProvisioner.limits.memory` | csi-provisioner memory limits | `400Mi` | | `controller.resources.csiProvisioner.requests.cpu` | csi-provisioner cpu requests limits | `10m` | | `controller.resources.csiProvisioner.requests.memory` | csi-provisioner memory requests limits | `20Mi` | | `controller.resources.livenessProbe.limits.memory` | liveness-probe memory limits | `300Mi` | @@ -86,7 +86,7 @@ The following table lists the configurable parameters of the latest SMB CSI Driv | `controller.resources.smb.limits.memory` | smb-csi-driver memory limits | `200Mi` | | `controller.resources.smb.requests.cpu` | smb-csi-driver cpu requests limits | `10m` | | `controller.resources.smb.requests.memory` | smb-csi-driver memory requests limits | `20Mi` | -| `controller.resources.csiResizer.limits.memory` | csi-resizer memory limits | `300Mi` | +| `controller.resources.csiResizer.limits.memory` | csi-resizer memory limits | `400Mi` | | `controller.resources.csiResizer.requests.cpu` | csi-resizer cpu requests limits | `10m` | | `controller.resources.csiResizer.requests.memory` | csi-resizer memory requests limits | `20Mi` | | `controller.affinity` | controller pod affinity | `{}` | diff --git a/charts/latest/csi-driver-smb-v0.0.0.tgz b/charts/latest/csi-driver-smb-v0.0.0.tgz index 890822e1e2a..ccb42500c61 100644 Binary files a/charts/latest/csi-driver-smb-v0.0.0.tgz and b/charts/latest/csi-driver-smb-v0.0.0.tgz differ diff --git a/charts/latest/csi-driver-smb/templates/csi-smb-controller.yaml b/charts/latest/csi-driver-smb/templates/csi-smb-controller.yaml index b800df63698..4961f82f1d6 100755 --- a/charts/latest/csi-driver-smb/templates/csi-smb-controller.yaml +++ b/charts/latest/csi-driver-smb/templates/csi-smb-controller.yaml @@ -78,6 +78,30 @@ spec: capabilities: drop: - ALL + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + securityContext: + capabilities: + drop: + - ALL - name: liveness-probe {{- if hasPrefix "/" .Values.image.livenessProbe.repository }} image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" diff --git a/charts/latest/csi-driver-smb/templates/rbac-csi-smb.yaml b/charts/latest/csi-driver-smb/templates/rbac-csi-smb.yaml index eec11a4310b..3e13eed752e 100755 --- a/charts/latest/csi-driver-smb/templates/rbac-csi-smb.yaml +++ b/charts/latest/csi-driver-smb/templates/rbac-csi-smb.yaml @@ -48,7 +48,6 @@ rules: resources: ["secrets"] verbs: ["get"] --- - kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -62,4 +61,40 @@ roleRef: kind: ClusterRole name: {{ .Values.rbac.name }}-external-provisioner-role apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role +{{ include "smb.labels" . | indent 2 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role +{{ include "smb.labels" . | indent 2 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io {{ end }} diff --git a/charts/latest/csi-driver-smb/values.yaml b/charts/latest/csi-driver-smb/values.yaml index 6e590df3707..aa828a0acb5 100755 --- a/charts/latest/csi-driver-smb/values.yaml +++ b/charts/latest/csi-driver-smb/values.yaml @@ -8,6 +8,10 @@ image: repository: /csi-provisioner tag: v5.1.0 pullPolicy: IfNotPresent + csiResizer: + repository: registry.k8s.io/sig-storage/csi-resizer + tag: v1.12.0 + pullPolicy: IfNotPresent livenessProbe: repository: /livenessprobe tag: v2.14.0 @@ -50,7 +54,13 @@ controller: resources: csiProvisioner: limits: - memory: 300Mi + memory: 400Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 400Mi requests: cpu: 10m memory: 20Mi diff --git a/deploy/csi-smb-controller.yaml b/deploy/csi-smb-controller.yaml index e59a050d165..52632a91404 100644 --- a/deploy/csi-smb-controller.yaml +++ b/deploy/csi-smb-controller.yaml @@ -55,7 +55,31 @@ spec: resources: limits: cpu: 1 - memory: 300Mi + memory: 400Mi + requests: + cpu: 10m + memory: 20Mi + securityContext: + capabilities: + drop: + - ALL + - name: csi-resizer + image: registry.k8s.io/sig-storage/csi-resizer:v1.12.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 400Mi requests: cpu: 10m memory: 20Mi diff --git a/deploy/rbac-csi-smb.yaml b/deploy/rbac-csi-smb.yaml index aa131b9e2d2..248a61c7b28 100644 --- a/deploy/rbac-csi-smb.yaml +++ b/deploy/rbac-csi-smb.yaml @@ -41,7 +41,6 @@ rules: resources: ["secrets"] verbs: ["get"] --- - kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -54,3 +53,37 @@ roleRef: kind: ClusterRole name: smb-external-provisioner-role apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: smb-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: smb-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-smb-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: smb-external-resizer-role + apiGroup: rbac.authorization.k8s.io diff --git a/hack/verify-helm-chart.sh b/hack/verify-helm-chart.sh index fa4b59f40fa..cc61985f924 100755 --- a/hack/verify-helm-chart.sh +++ b/hack/verify-helm-chart.sh @@ -62,8 +62,9 @@ pip install yq --break-system-packages --ignore-installed PyYAML # Extract images from csi-smb-controller.yaml expected_csi_provisioner_image="$(cat ${PKG_ROOT}/deploy/csi-smb-controller.yaml | yq -r .spec.template.spec.containers[0].image | head -n 1)" -expected_liveness_probe_image="$(cat ${PKG_ROOT}/deploy/csi-smb-controller.yaml | yq -r .spec.template.spec.containers[1].image | head -n 1)" -expected_smb_image="$(cat ${PKG_ROOT}/deploy/csi-smb-controller.yaml | yq -r .spec.template.spec.containers[2].image | head -n 1)" +expected_csi_resizer_image="$(cat ${PKG_ROOT}/deploy/csi-smb-controller.yaml | yq -r .spec.template.spec.containers[1].image | head -n 1)" +expected_liveness_probe_image="$(cat ${PKG_ROOT}/deploy/csi-smb-controller.yaml | yq -r .spec.template.spec.containers[2].image | head -n 1)" +expected_smb_image="$(cat ${PKG_ROOT}/deploy/csi-smb-controller.yaml | yq -r .spec.template.spec.containers[3].image | head -n 1)" csi_provisioner_image="$(get_image_from_helm_chart "csiProvisioner")" validate_image "${expected_csi_provisioner_image}" "${csi_provisioner_image}" diff --git a/pkg/smb/controllerserver.go b/pkg/smb/controllerserver.go index 30db2cb4ec3..b16d0b88eaa 100644 --- a/pkg/smb/controllerserver.go +++ b/pkg/smb/controllerserver.go @@ -308,8 +308,19 @@ func (d *Driver) ListVolumes(_ context.Context, _ *csi.ListVolumesRequest) (*csi } // ControllerExpandVolume expand volume -func (d *Driver) ControllerExpandVolume(_ context.Context, _ *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") +func (d *Driver) ControllerExpandVolume(_ context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request") + } + + if req.GetCapacityRange() == nil { + return nil, status.Error(codes.InvalidArgument, "Capacity Range missing in request") + } + + volSizeBytes := int64(req.GetCapacityRange().GetRequiredBytes()) + klog.V(2).Infof("ControllerExpandVolume(%s) successfully, currentQuota: %d bytes", req.VolumeId, volSizeBytes) + + return &csi.ControllerExpandVolumeResponse{CapacityBytes: req.GetCapacityRange().GetRequiredBytes()}, nil } func (d *Driver) CreateSnapshot(_ context.Context, _ *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { diff --git a/pkg/smb/controllerserver_test.go b/pkg/smb/controllerserver_test.go index 3b2893255ea..f11c55853f1 100644 --- a/pkg/smb/controllerserver_test.go +++ b/pkg/smb/controllerserver_test.go @@ -398,11 +398,54 @@ func TestListVolumes(t *testing.T) { func TestControllerExpandVolume(t *testing.T) { d := NewFakeDriver() - req := csi.ControllerExpandVolumeRequest{} - resp, err := d.ControllerExpandVolume(context.Background(), &req) - assert.Nil(t, resp) - if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) { - t.Errorf("Unexpected error: %v", err) + + testCases := []struct { + name string + testFunc func(t *testing.T) + }{ + { + name: "volume ID missing", + testFunc: func(t *testing.T) { + req := &csi.ControllerExpandVolumeRequest{} + _, err := d.ControllerExpandVolume(context.Background(), req) + expectedErr := status.Error(codes.InvalidArgument, "Volume ID missing in request") + if !reflect.DeepEqual(err, expectedErr) { + t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) + } + }, + }, + { + name: "Capacity Range missing", + testFunc: func(t *testing.T) { + req := &csi.ControllerExpandVolumeRequest{ + VolumeId: "unit-test", + } + _, err := d.ControllerExpandVolume(context.Background(), req) + expectedErr := status.Error(codes.InvalidArgument, "Capacity Range missing in request") + if !reflect.DeepEqual(err, expectedErr) { + t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) + } + }, + }, + { + name: "Error = nil", + testFunc: func(t *testing.T) { + req := &csi.ControllerExpandVolumeRequest{ + VolumeId: "unit-test", + CapacityRange: &csi.CapacityRange{ + RequiredBytes: 10000, + }, + } + _, err := d.ControllerExpandVolume(context.Background(), req) + if !reflect.DeepEqual(err, nil) { + t.Errorf("actualErr: (%v), expectedErr: (%v)", err, nil) + } + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, tc.testFunc) } } diff --git a/test/e2e/dynamic_provisioning_test.go b/test/e2e/dynamic_provisioning_test.go index e37fa6f6cfb..b269cd55cef 100644 --- a/test/e2e/dynamic_provisioning_test.go +++ b/test/e2e/dynamic_provisioning_test.go @@ -54,7 +54,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { }) testDriver = driver.InitSMBDriver() - ginkgo.It("should create a volume after driver restart [smb.csi.k8s.io]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create a volume after driver restart", func(ctx ginkgo.SpecContext) { ginkgo.Skip("test case is disabled since node logs would be lost after driver restart") pod := testsuites.PodDetails{ Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' >> /mnt/test-1/data && while true; do sleep 3600; done"), @@ -102,7 +102,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It("should create a volume on demand with mount options [smb.csi.k8s.io] [Windows]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create a volume on demand with mount options [Windows]", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), @@ -137,7 +137,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [smb.csi.k8s.io] [Windows]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [Windows]", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("while true; do echo $(date -u) >> /mnt/test-1/data; sleep 100; done"), @@ -178,7 +178,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { }) // Track issue https://github.com/kubernetes/kubernetes/issues/70505 - ginkgo.It("should create a volume on demand and mount it as readOnly in a pod [smb.csi.k8s.io]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create a volume on demand and mount it as readOnly in a pod", func(ctx ginkgo.SpecContext) { // Windows volume does not support readOnly skipIfTestingInWindowsCluster() pods := []testsuites.PodDetails{ @@ -206,7 +206,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again [smb.csi.k8s.io] [Windows]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again [Windows]", func(ctx ginkgo.SpecContext) { skipIfTestingInWindowsCluster() pod := testsuites.PodDetails{ Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done"), @@ -246,7 +246,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { }) // Track issue https://github.com/kubernetes-csi/csi-driver-smb/issues/834 - ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy even if it contains read-only subdir %q [smb.csi.k8s.io]", v1.PersistentVolumeReclaimDelete), func(ctx ginkgo.SpecContext) { + ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy even if it contains read-only subdir %q", v1.PersistentVolumeReclaimDelete), func(ctx ginkgo.SpecContext) { skipIfTestingInWindowsCluster() reclaimPolicy := v1.PersistentVolumeReclaimDelete pod := testsuites.PodDetails{ @@ -286,7 +286,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [smb.csi.k8s.io] [Windows]", v1.PersistentVolumeReclaimDelete), func(ctx ginkgo.SpecContext) { + ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [Windows]", v1.PersistentVolumeReclaimDelete), func(ctx ginkgo.SpecContext) { reclaimPolicy := v1.PersistentVolumeReclaimDelete volumes := []testsuites.VolumeDetails{ { @@ -302,7 +302,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [smb.csi.k8s.io] [Windows]", v1.PersistentVolumeReclaimRetain), func(ctx ginkgo.SpecContext) { + ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [Windows]", v1.PersistentVolumeReclaimRetain), func(ctx ginkgo.SpecContext) { reclaimPolicy := v1.PersistentVolumeReclaimRetain volumes := []testsuites.VolumeDetails{ { @@ -319,7 +319,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It("should create a pod with multiple volumes [smb.csi.k8s.io] [Windows]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create a pod with multiple volumes [Windows]", func(ctx ginkgo.SpecContext) { volumes := []testsuites.VolumeDetails{} for i := 1; i <= 6; i++ { volume := testsuites.VolumeDetails{ @@ -348,7 +348,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It("should create a pod with volume mount subpath [smb.csi.k8s.io] [Windows]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create a pod with volume mount subpath [Windows]", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), @@ -373,7 +373,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It("should clone a volume from an existing volume [smb.csi.k8s.io]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should clone a volume from an existing volume", func(ctx ginkgo.SpecContext) { skipIfTestingInWindowsCluster() pod := testsuites.PodDetails{ @@ -409,7 +409,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It("should create a volume on demand with retaining subdir on delete [smb.csi.k8s.io]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create a volume on demand with retaining subdir on delete", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), @@ -443,7 +443,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It("should create a volume on demand with archive on archive [smb.csi.k8s.io]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create a volume on demand with archive on archive", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), @@ -477,7 +477,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) - ginkgo.It("should create a volume on demand with archive on archive subDir [smb.csi.k8s.io]", func(ctx ginkgo.SpecContext) { + ginkgo.It("should create a volume on demand with archive on archive subDir", func(ctx ginkgo.SpecContext) { pods := []testsuites.PodDetails{ { Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), @@ -510,4 +510,27 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() { } test.Run(ctx, cs, ns) }) + + ginkgo.It("should create a volume on demand and resize it", func(ctx ginkgo.SpecContext) { + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + ClaimSize: "10Gi", + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedResizeVolumeTest{ + CSIDriver: testDriver, + Pods: pods, + StorageClassParameters: archiveSubDirStorageClassParameters, + } + test.Run(ctx, cs, ns) + }) }) diff --git a/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go new file mode 100644 index 00000000000..97e2f9b9c0f --- /dev/null +++ b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go @@ -0,0 +1,99 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/kubernetes-csi/csi-driver-smb/test/e2e/driver" + "github.com/onsi/ginkgo/v2" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +// DynamicallyProvisionedResizeVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) +// Waiting for the PV provisioner to resize the PV +// Testing if the PV is resized successfully. +type DynamicallyProvisionedResizeVolumeTest struct { + CSIDriver driver.DynamicPVTestDriver + Pods []PodDetails + StorageClassParameters map[string]string +} + +func (t *DynamicallyProvisionedResizeVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) { + for _, pod := range t.Pods { + tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters) + // defer must be called here for resources not get removed before using them + for i := range cleanup { + defer cleanup[i](ctx) + } + + ginkgo.By("deploying the pod") + tpod.Create(ctx) + defer tpod.Cleanup(ctx) + ginkgo.By("checking that the pods command exits with no error") + tpod.WaitForSuccess(ctx) + + pvcName := tpod.pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(ctx, pvcName, metav1.GetOptions{}) + if err != nil { + framework.ExpectNoError(err, fmt.Sprintf("fail to get original pvc(%s): %v", pvcName, err)) + } + + originalSize := pvc.Spec.Resources.Requests["storage"] + delta := resource.Quantity{} + delta.Set(1024 * 1024 * 1024) + originalSize.Add(delta) + pvc.Spec.Resources.Requests["storage"] = originalSize + + ginkgo.By("resizing the pvc") + updatedPvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Update(ctx, pvc, metav1.UpdateOptions{}) + if err != nil { + framework.ExpectNoError(err, fmt.Sprintf("fail to resize pvc(%s): %v", pvcName, err)) + } + updatedSize := updatedPvc.Spec.Resources.Requests["storage"] + + ginkgo.By("sleep 30s waiting for resize complete") + time.Sleep(30 * time.Second) + + ginkgo.By("checking the resizing result") + newPvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(ctx, tpod.pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + if err != nil { + framework.ExpectNoError(err, fmt.Sprintf("fail to get new pvc(%s): %v", pvcName, err)) + } + newSize := newPvc.Spec.Resources.Requests["storage"] + if !newSize.Equal(updatedSize) { + framework.Failf("newSize(%+v) is not equal to updatedSize(%+v)", newSize, updatedSize) + } + + ginkgo.By("checking the resizing PV result") + newPv, _ := client.CoreV1().PersistentVolumes().Get(ctx, updatedPvc.Spec.VolumeName, metav1.GetOptions{}) + newPvSize := newPv.Spec.Capacity["storage"] + newPvSizeStr := newPvSize.String() + "Gi" + + if !strings.Contains(newPvSizeStr, newSize.String()) { + framework.Failf("newPVCSize(%+v) is not equal to newPVSize(%+v)", newSize.String(), newPvSizeStr) + } + } +} diff --git a/test/external-e2e/testdriver.yaml b/test/external-e2e/testdriver.yaml index 6029fca0883..5d66de3c623 100644 --- a/test/external-e2e/testdriver.yaml +++ b/test/external-e2e/testdriver.yaml @@ -12,5 +12,7 @@ DriverInfo: multipods: true RWX: true fsGroup: true + controllerExpansion: true + nodeExpansion: true volumeMountGroup: true pvcDataSource: true