From fe363d4bdabdc2ecf78d5edccb395b1c3bcb1c51 Mon Sep 17 00:00:00 2001 From: shlokshah-dev Date: Thu, 18 Aug 2022 12:52:06 -0400 Subject: [PATCH 1/3] chore: added unit tests for SharedState --- pkg/controller/replica_test.go | 108 ------ pkg/controller/shared_state.go | 5 +- pkg/controller/shared_state_test.go | 513 ++++++++++++++++++++++++++++ 3 files changed, 517 insertions(+), 109 deletions(-) create mode 100644 pkg/controller/shared_state_test.go diff --git a/pkg/controller/replica_test.go b/pkg/controller/replica_test.go index 3b947ad624..0a63d2a676 100644 --- a/pkg/controller/replica_test.go +++ b/pkg/controller/replica_test.go @@ -23,9 +23,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -285,109 +283,3 @@ func TestReplicaReconcile(t *testing.T) { }) } } - -func TestGetNodesForReplica(t *testing.T) { - tests := []struct { - description string - volumes []string - pods []v1.Pod - setupFunc func(*testing.T, *gomock.Controller) *ReconcileReplica - verifyFunc func(*testing.T, []string, error) - }{ - { - description: "[Success] Should not select nodes with no remaining capacity.", - volumes: []string{testPersistentVolume0Name}, - pods: []v1.Pod{testPod0}, - setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *ReconcileReplica { - replicaAttachment := testReplicaAzVolumeAttachment - now := metav1.Time{Time: metav1.Now().Add(-1000)} - replicaAttachment.DeletionTimestamp = &now - - newVolume := testAzVolume0.DeepCopy() - newVolume.Status.Detail = &azdiskv1beta2.AzVolumeStatusDetail{ - VolumeID: testManagedDiskURI0, - } - - newNode := testNode0.DeepCopy() - newNode.Status.Allocatable[consts.AttachableVolumesField] = resource.MustParse("0") - - controller := NewTestReplicaController( - mockCtl, - testNamespace, - newVolume, - &testPersistentVolume0, - newNode, - &testNode2, - &testPod0, - ) - - mockClients(controller.cachedClient.(*mockclient.MockClient), controller.azClient, controller.kubeClient) - return controller - }, - verifyFunc: func(t *testing.T, nodes []string, err error) { - require.NoError(t, err) - require.Len(t, nodes, 1) - require.NotEqual(t, nodes[0], testNode0Name) - }, - }, - { - description: "[Success] Should not create replica attachment on a node that does not match volume's node affinity rule", - volumes: []string{testPersistentVolume0Name}, - pods: []v1.Pod{testPod0}, - setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *ReconcileReplica { - replicaAttachment := testReplicaAzVolumeAttachment - now := metav1.Time{Time: metav1.Now().Add(-1000)} - replicaAttachment.DeletionTimestamp = &now - - newPV := testPersistentVolume0.DeepCopy() - newPV.Spec.NodeAffinity = &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{{ - MatchExpressions: []v1.NodeSelectorRequirement{{ - Key: consts.TopologyRegionKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{"westus2"}, - }}, - }}, - }, - } - - newVolume := testAzVolume0.DeepCopy() - newVolume.Status.Detail = &azdiskv1beta2.AzVolumeStatusDetail{ - VolumeID: testManagedDiskURI0, - } - - newNode := testNode0.DeepCopy() - newNode.Labels = map[string]string{consts.TopologyRegionKey: "westus2"} - - controller := NewTestReplicaController( - mockCtl, - testNamespace, - newVolume, - newPV, - newNode, - &testNode1, - &testPod0, - ) - - mockClients(controller.cachedClient.(*mockclient.MockClient), controller.azClient, controller.kubeClient) - return controller - }, - verifyFunc: func(t *testing.T, nodes []string, err error) { - require.NoError(t, err) - require.Len(t, nodes, 1) - require.NotEqual(t, nodes[0], testNode1Name) - }, - }, - } - for _, test := range tests { - tt := test - t.Run(tt.description, func(t *testing.T) { - mockCtl := gomock.NewController(t) - defer mockCtl.Finish() - controller := tt.setupFunc(t, mockCtl) - nodes, err := controller.getRankedNodesForReplicaAttachments(context.TODO(), tt.volumes, tt.pods) - tt.verifyFunc(t, nodes, err) - }) - } -} diff --git a/pkg/controller/shared_state.go b/pkg/controller/shared_state.go index 0a596353d1..2177757af0 100644 --- a/pkg/controller/shared_state.go +++ b/pkg/controller/shared_state.go @@ -447,7 +447,10 @@ func (c *SharedState) addPod(ctx context.Context, pod *v1.Pod, updateOption upda } namespacedClaimName := getQualifiedName(pod.Namespace, volume.PersistentVolumeClaim.ClaimName) if _, ok := c.claimToVolumeMap.Load(namespacedClaimName); !ok { - w.Logger().V(5).Infof("Skipping Pod %s. Volume %s not csi. Driver: %+v", pod.Name, volume.Name, volume.CSI) + // Log message if the Pod status is Running + if pod.Status.Phase == v1.PodRunning { + w.Logger().V(5).Infof("Skipping Pod %s. Volume %s not csi. Driver: %+v", pod.Name, volume.Name, volume.CSI) + } continue } w.Logger().V(5).Infof("Pod %s. Volume %v is csi.", pod.Name, volume) diff --git a/pkg/controller/shared_state_test.go b/pkg/controller/shared_state_test.go new file mode 100644 index 0000000000..1e184f2e9d --- /dev/null +++ b/pkg/controller/shared_state_test.go @@ -0,0 +1,513 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + fakev1 "k8s.io/client-go/kubernetes/fake" + azdiskv1beta2 "sigs.k8s.io/azuredisk-csi-driver/pkg/apis/azuredisk/v1beta2" + azdiskfakes "sigs.k8s.io/azuredisk-csi-driver/pkg/apis/client/clientset/versioned/fake" + consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" + "sigs.k8s.io/azuredisk-csi-driver/pkg/controller/mockclient" +) + +func NewTestSharedState(controller *gomock.Controller, namespace string, objects ...runtime.Object) *SharedState { + azDiskObjs, kubeObjs := splitObjects(objects...) + testSharedState := initState(mockclient.NewMockClient(controller), azdiskfakes.NewSimpleClientset(azDiskObjs...), fakev1.NewSimpleClientset(kubeObjs...), objects...) + + return testSharedState +} + +func TestGetNodesForReplica(t *testing.T) { + tests := []struct { + description string + volumes []string + pods []v1.Pod + setupFunc func(*testing.T, *gomock.Controller) *ReconcileReplica + verifyFunc func(*testing.T, []string, error) + }{ + { + description: "[Success] Should not select nodes with no remaining capacity.", + volumes: []string{testPersistentVolume0Name}, + pods: []v1.Pod{testPod0}, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *ReconcileReplica { + replicaAttachment := testReplicaAzVolumeAttachment + now := metav1.Time{Time: metav1.Now().Add(-1000)} + replicaAttachment.DeletionTimestamp = &now + + newVolume := testAzVolume0.DeepCopy() + newVolume.Status.Detail = &azdiskv1beta2.AzVolumeStatusDetail{ + VolumeID: testManagedDiskURI0, + } + + newNode := testNode0.DeepCopy() + newNode.Status.Allocatable[consts.AttachableVolumesField] = resource.MustParse("0") + + controller := NewTestReplicaController( + mockCtl, + testNamespace, + newVolume, + &testPersistentVolume0, + newNode, + &testNode2, + &testPod0, + ) + + mockClients(controller.cachedClient.(*mockclient.MockClient), controller.azClient, controller.kubeClient) + return controller + }, + verifyFunc: func(t *testing.T, nodes []string, err error) { + require.NoError(t, err) + require.Len(t, nodes, 1) + require.NotEqual(t, nodes[0], testNode0Name) + }, + }, + { + description: "[Success] Should not create replica attachment on a node that does not match volume's node affinity rule", + volumes: []string{testPersistentVolume0Name}, + pods: []v1.Pod{testPod0}, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *ReconcileReplica { + replicaAttachment := testReplicaAzVolumeAttachment + now := metav1.Time{Time: metav1.Now().Add(-1000)} + replicaAttachment.DeletionTimestamp = &now + + newPV := testPersistentVolume0.DeepCopy() + newPV.Spec.NodeAffinity = &v1.VolumeNodeAffinity{ + Required: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{{ + MatchExpressions: []v1.NodeSelectorRequirement{{ + Key: consts.TopologyRegionKey, + Operator: v1.NodeSelectorOpIn, + Values: []string{"westus2"}, + }}, + }}, + }, + } + + newVolume := testAzVolume0.DeepCopy() + newVolume.Status.Detail = &azdiskv1beta2.AzVolumeStatusDetail{ + VolumeID: testManagedDiskURI0, + } + + newNode := testNode0.DeepCopy() + newNode.Labels = map[string]string{consts.TopologyRegionKey: "westus2"} + + controller := NewTestReplicaController( + mockCtl, + testNamespace, + newVolume, + newPV, + newNode, + &testNode1, + &testPod0, + ) + + mockClients(controller.cachedClient.(*mockclient.MockClient), controller.azClient, controller.kubeClient) + return controller + }, + verifyFunc: func(t *testing.T, nodes []string, err error) { + require.NoError(t, err) + require.Len(t, nodes, 1) + require.NotEqual(t, nodes[0], testNode1Name) + }, + }, + } + for _, test := range tests { + tt := test + t.Run(tt.description, func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + controller := tt.setupFunc(t, mockCtl) + nodes, err := controller.getRankedNodesForReplicaAttachments(context.TODO(), tt.volumes, tt.pods) + tt.verifyFunc(t, nodes, err) + }) + } +} + +func TestRecoveryLifecycle(t *testing.T) { + tests := []struct { + description string + setupFunc func(*testing.T, *gomock.Controller) *SharedState + verifyFunc func(*testing.T, bool) + }{ + { + description: "[Success] IsRecoveryComplete should return true after MarkRecoveryComplete is called", + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + testSharedState := NewTestSharedState(mockCtl, testNamespace) + testSharedState.MarkRecoveryComplete() + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, isRecoveryComplete bool) { + require.True(t, isRecoveryComplete) + }, + }, + } + for _, test := range tests { + tt := test + t.Run(tt.description, func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + sharedState := tt.setupFunc(t, mockCtl) + isRecoveryComplete := sharedState.isRecoveryComplete() + tt.verifyFunc(t, isRecoveryComplete) + }) + } +} + +func TestFilterNodes(t *testing.T) { + tests := []struct { + description string + nodes []v1.Node + volumes []string + pods []v1.Pod + setupFunc func(*testing.T, *gomock.Controller) *SharedState + verifyFunc func(*testing.T, []v1.Node, error) + }{ + { + description: "[Failure] Should throw error when AzVolume not found", + nodes: []v1.Node{testNode0}, + volumes: []string{"random-volume-name"}, + pods: []v1.Pod{testPod0}, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + newVolume := testAzVolume0.DeepCopy() + newVolume.Status.Detail = &azdiskv1beta2.AzVolumeStatusDetail{ + VolumeID: testManagedDiskURI0, + } + + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + newVolume, + &testPersistentVolume0, + &testNode2, + &testPod0, + ) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, nodes []v1.Node, err error) { + require.Error(t, err) + require.Nil(t, nodes) + }, + }, + { + description: "[Success] Successfully filter the given nodes", + nodes: []v1.Node{testNode0, testNode1, testNode2}, + volumes: []string{testPersistentVolume0Name}, + pods: []v1.Pod{testPod0}, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + newVolume := testAzVolume0.DeepCopy() + newVolume.Status.Detail = &azdiskv1beta2.AzVolumeStatusDetail{ + VolumeID: testManagedDiskURI0, + } + + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + newVolume, + &testPersistentVolume0, + &testNode2, + &testPod0, + ) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, nodes []v1.Node, err error) { + require.NoError(t, err) + require.Len(t, nodes, 3) + }, + }, + } + + for _, test := range tests { + tt := test + t.Run(tt.description, func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + sharedState := tt.setupFunc(t, mockCtl) + nodes, err := sharedState.filterNodes(context.TODO(), tt.nodes, tt.pods, tt.volumes) + tt.verifyFunc(t, nodes, err) + }) + } +} + +func TestGetVolumesFromPod(t *testing.T) { + tests := []struct { + description string + podName string + setupFunc func(*testing.T, *gomock.Controller) *SharedState + verifyFunc func(*testing.T, []string, error) + }{ + { + description: "[Failure] Should throw an error when PodName is not present in the podToClaims map", + podName: "Random-Pod-Name", + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + &testPersistentVolume0, + &testNode2, + &testPod0, + ) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, volumes []string, err error) { + require.Error(t, err) + require.Nil(t, volumes) + }, + }, + { + description: "[Failure] Should throw an error when value in podToClaimsMap is not of type []string", + podName: "test-pod-0", + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + &testPod0, + ) + testSharedState.podToClaimsMap.Store(testPod0Name, 1) // Inserting a value which is not of type []string + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, volumes []string, err error) { + require.Error(t, err) + require.Nil(t, volumes) + }, + }, + { + description: "[Failure] Should throw an error when volume in claimToVolumeMap is not of type string", + podName: "test-pod-0", + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + &testPod0, + ) + testSharedState.podToClaimsMap.Store(testPod0Name, []string{testPersistentVolumeClaim0Name, testPersistentVolumeClaim1Name}) + testSharedState.claimToVolumeMap.Store(testPersistentVolumeClaim0Name, 1) // Inserting a value which is not of type string + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, volumes []string, err error) { + require.Error(t, err) + require.Nil(t, volumes) + }, + }, + { + description: "[Sucess] Sucessfully return Volumes for the given podName", + podName: "test-pod-0", + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + &testPod0, + ) + testSharedState.podToClaimsMap.Store(testPod0Name, []string{testPersistentVolumeClaim0Name, testPersistentVolumeClaim1Name}) + testSharedState.claimToVolumeMap.Store(testPersistentVolumeClaim0Name, testPersistentVolume0Name) + testSharedState.claimToVolumeMap.Store(testPersistentVolumeClaim1Name, testPersistentVolume1Name) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, volumes []string, err error) { + require.NoError(t, err) + require.Len(t, volumes, 2) + }, + }, + } + + for _, test := range tests { + tt := test + t.Run(tt.description, func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + sharedState := tt.setupFunc(t, mockCtl) + volumes, err := sharedState.getVolumesFromPod(context.TODO(), test.podName) + tt.verifyFunc(t, volumes, err) + }) + } +} + +func TestGetPodsFromVolume(t *testing.T) { + tests := []struct { + description string + volumeName string + setupFunc func(*testing.T, *gomock.Controller) *SharedState + verifyFunc func(*testing.T, []v1.Pod, error) + }{ + { + description: "[Failure] Should return an error when Volume name is not found", + volumeName: "invalid-volume", + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + &testPod0, + ) + testSharedState.podToClaimsMap.Store(testPod0Name, []string{testPersistentVolumeClaim0Name, testPersistentVolumeClaim1Name}) + testSharedState.claimToVolumeMap.Store(testPersistentVolumeClaim0Name, testPersistentVolume0Name) + testSharedState.claimToVolumeMap.Store(testPersistentVolumeClaim1Name, testPersistentVolume1Name) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, pods []v1.Pod, err error) { + require.Error(t, err) + require.Nil(t, pods) + }, + }, + { + description: "[Success] Should return Pods for valid Volume", + volumeName: testPersistentVolume0Name, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + &testPod0, + &testPersistentVolume0, + ) + podName := testNamespace + "/" + testPod0Name // Pods's qualified name is of the format / + + testSharedState.volumeToClaimMap.Store(testPersistentVolume0Name, testPersistentVolumeClaim0Name) + testSharedState.claimToPodsMap.Store(testPersistentVolumeClaim0Name, newLockableEntry(set{podName: {}})) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, pods []v1.Pod, err error) { + require.NoError(t, err) + require.Len(t, pods, 1) + }, + }, + } + for _, test := range tests { + tt := test + t.Run(tt.description, func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + sharedState := tt.setupFunc(t, mockCtl) + pods, err := sharedState.getPodsFromVolume(context.TODO(), sharedState.cachedClient, test.volumeName) + tt.verifyFunc(t, pods, err) + }) + } +} + +func TestVolumeVisitedFlow(t *testing.T) { + tests := []struct { + description string + azVolumeName string + setupFunc func(*testing.T, *gomock.Controller) *SharedState + verifyFunc func(*testing.T, bool) + }{ + { + description: "[Success] isVolumeVisited should return true after markVolumeVisited is called", + azVolumeName: testPersistentVolume0Name, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + testSharedState := NewTestSharedState(mockCtl, testNamespace) + testSharedState.markVolumeVisited(testPersistentVolume0Name) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, isVolumeVisited bool) { + require.True(t, isVolumeVisited) + }, + }, + { + description: "[Success] isVolumeVisited should return false after unmarkVolumeVisited is called", + azVolumeName: testPersistentVolume0Name, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + testSharedState := NewTestSharedState(mockCtl, testNamespace) + testSharedState.markVolumeVisited(testPersistentVolume0Name) + testSharedState.unmarkVolumeVisited(testPersistentVolume0Name) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, isVolumeVisited bool) { + require.False(t, isVolumeVisited) + }, + }, + } + for _, test := range tests { + tt := test + t.Run(tt.description, func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + sharedState := tt.setupFunc(t, mockCtl) + isVolumeVisited := sharedState.isVolumeVisited(tt.azVolumeName) + tt.verifyFunc(t, isVolumeVisited) + }) + } +} + +func TestGetNodesWithReplica(t *testing.T) { + tests := []struct { + description string + volumeName string + setupFunc func(*testing.T, *gomock.Controller) *SharedState + verifyFunc func(*testing.T, []string, error) + }{ + { + description: "[Success] Should return Nodes with AzVolumeAttachments for a valid volume", + volumeName: testPersistentVolume0Name, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + + newNode := testNode0.DeepCopy() + + testSharedState := NewTestSharedState(mockCtl, + testNamespace, + newNode, + &testPersistentVolume0, + ) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, nodes []string, err error) { + require.NoError(t, err) + require.NotNil(t, nodes) + }, + }, + } + for _, test := range tests { + tt := test + t.Run(tt.description, func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + sharedState := tt.setupFunc(t, mockCtl) + nodes, err := sharedState.getNodesWithReplica(context.TODO(), tt.volumeName) + tt.verifyFunc(t, nodes, err) + }) + } +} From 47e10237ce6b5962fef0034057c9503bbf00119c Mon Sep 17 00:00:00 2001 From: shlokshah-dev Date: Thu, 18 Aug 2022 13:24:24 -0400 Subject: [PATCH 2/3] fixed spell check issues --- pkg/controller/shared_state_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/shared_state_test.go b/pkg/controller/shared_state_test.go index 1e184f2e9d..9e396a304b 100644 --- a/pkg/controller/shared_state_test.go +++ b/pkg/controller/shared_state_test.go @@ -324,7 +324,7 @@ func TestGetVolumesFromPod(t *testing.T) { }, }, { - description: "[Sucess] Sucessfully return Volumes for the given podName", + description: "[Success] Successfully return Volumes for the given podName", podName: "test-pod-0", setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { testSharedState := NewTestSharedState( From 769d6bab5715821279760ffc67fbf49d303d6562 Mon Sep 17 00:00:00 2001 From: shlokshah-dev Date: Mon, 22 Aug 2022 11:22:55 -0700 Subject: [PATCH 3/3] chore: added more unit tests for SharedState --- pkg/controller/shared_state_test.go | 106 +++++++++++++++++++++++++--- 1 file changed, 97 insertions(+), 9 deletions(-) diff --git a/pkg/controller/shared_state_test.go b/pkg/controller/shared_state_test.go index 9e396a304b..39ee3eb0cc 100644 --- a/pkg/controller/shared_state_test.go +++ b/pkg/controller/shared_state_test.go @@ -45,14 +45,14 @@ func TestGetNodesForReplica(t *testing.T) { description string volumes []string pods []v1.Pod - setupFunc func(*testing.T, *gomock.Controller) *ReconcileReplica + setupFunc func(*testing.T, *gomock.Controller) *SharedState verifyFunc func(*testing.T, []string, error) }{ { description: "[Success] Should not select nodes with no remaining capacity.", volumes: []string{testPersistentVolume0Name}, pods: []v1.Pod{testPod0}, - setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *ReconcileReplica { + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { replicaAttachment := testReplicaAzVolumeAttachment now := metav1.Time{Time: metav1.Now().Add(-1000)} replicaAttachment.DeletionTimestamp = &now @@ -65,7 +65,7 @@ func TestGetNodesForReplica(t *testing.T) { newNode := testNode0.DeepCopy() newNode.Status.Allocatable[consts.AttachableVolumesField] = resource.MustParse("0") - controller := NewTestReplicaController( + testSharedState := NewTestSharedState( mockCtl, testNamespace, newVolume, @@ -75,8 +75,8 @@ func TestGetNodesForReplica(t *testing.T) { &testPod0, ) - mockClients(controller.cachedClient.(*mockclient.MockClient), controller.azClient, controller.kubeClient) - return controller + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState }, verifyFunc: func(t *testing.T, nodes []string, err error) { require.NoError(t, err) @@ -88,7 +88,7 @@ func TestGetNodesForReplica(t *testing.T) { description: "[Success] Should not create replica attachment on a node that does not match volume's node affinity rule", volumes: []string{testPersistentVolume0Name}, pods: []v1.Pod{testPod0}, - setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *ReconcileReplica { + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { replicaAttachment := testReplicaAzVolumeAttachment now := metav1.Time{Time: metav1.Now().Add(-1000)} replicaAttachment.DeletionTimestamp = &now @@ -114,7 +114,7 @@ func TestGetNodesForReplica(t *testing.T) { newNode := testNode0.DeepCopy() newNode.Labels = map[string]string{consts.TopologyRegionKey: "westus2"} - controller := NewTestReplicaController( + testSharedState := NewTestSharedState( mockCtl, testNamespace, newVolume, @@ -124,8 +124,8 @@ func TestGetNodesForReplica(t *testing.T) { &testPod0, ) - mockClients(controller.cachedClient.(*mockclient.MockClient), controller.azClient, controller.kubeClient) - return controller + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState }, verifyFunc: func(t *testing.T, nodes []string, err error) { require.NoError(t, err) @@ -511,3 +511,91 @@ func TestGetNodesWithReplica(t *testing.T) { }) } } + +func TestFilterAndSortNodes(t *testing.T) { + tests := []struct { + description string + nodes []v1.Node + volumes []string + pods []v1.Pod + setupFunc func(*testing.T, *gomock.Controller) *SharedState + verifyFunc func(*testing.T, []v1.Node, error) + }{ + { + description: "[Failure] Should throw error when AzVolume not found", + nodes: []v1.Node{testNode0}, + volumes: []string{"random-volume-name"}, + pods: []v1.Pod{testPod0}, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + newVolume := testAzVolume0.DeepCopy() + newVolume.Status.Detail = &azdiskv1beta2.AzVolumeStatusDetail{ + VolumeID: testManagedDiskURI0, + } + + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + newVolume, + &testPersistentVolume0, + &testNode2, + &testPod0, + ) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, nodes []v1.Node, err error) { + require.Error(t, err) + require.Nil(t, nodes) + }, + }, + { + description: "[Success] Should return filtered and sorted list of nodes when AzVolume is valid", + nodes: []v1.Node{testNode0, testNode1}, + volumes: []string{testPersistentVolume0Name, testPersistentVolume1Name}, + pods: []v1.Pod{testPod0, testPod1}, + setupFunc: func(t *testing.T, mockCtl *gomock.Controller) *SharedState { + newVolume := testAzVolume0.DeepCopy() + newVolume.Status.Detail = &azdiskv1beta2.AzVolumeStatusDetail{ + VolumeID: testManagedDiskURI0, + } + + newVolume1 := testAzVolume1.DeepCopy() + newVolume1.Status.Detail = &azdiskv1beta2.AzVolumeStatusDetail{ + VolumeID: testManagedDiskURI0, + } + + testSharedState := NewTestSharedState( + mockCtl, + testNamespace, + newVolume, + newVolume1, + &testPersistentVolume0, + &testPersistentVolume1, + &testNode0, + &testNode1, + &testPod0, + &testPod1, + ) + + mockClients(testSharedState.cachedClient.(*mockclient.MockClient), testSharedState.azClient, testSharedState.kubeClient) + return testSharedState + }, + verifyFunc: func(t *testing.T, nodes []v1.Node, err error) { + require.NoError(t, err) + require.Len(t, nodes, 2) + }, + }, + } + + for _, test := range tests { + tt := test + t.Run(tt.description, func(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + sharedState := tt.setupFunc(t, mockCtl) + nodes, err := sharedState.filterAndSortNodes(context.TODO(), tt.nodes, tt.pods, tt.volumes) + tt.verifyFunc(t, nodes, err) + }) + } +}