From 971a489281805ceeee22d769f37fdcfa082b4614 Mon Sep 17 00:00:00 2001 From: lukeatdell <115811384+lukeatdell@users.noreply.github.com> Date: Thu, 21 Nov 2024 14:18:37 -0600 Subject: [PATCH 01/15] Add availability zone secret and struct (#352) * add availability zone secret and struct --- samples/secret.yaml | 31 +++++++++++++++++++++++++++++- service/service.go | 47 ++++++++++++++++++++++++++++++--------------- 2 files changed, 61 insertions(+), 17 deletions(-) diff --git a/samples/secret.yaml b/samples/secret.yaml index 98597933..7a564952 100644 --- a/samples/secret.yaml +++ b/samples/secret.yaml @@ -33,10 +33,39 @@ # Default value: "" # This is an optional field from v2.10.0 onwards for PowerFlex storage system >=4.0.x nasName: "nas-server" -# # To add more PowerFlex systems, uncomment the following lines and provide the required values + + # # zone: A cluster availability zone to which the PowerFlex system should be bound. + # # The mapping is one-to-one - the PowerFlex system cannot belong to more than one zone. + # # Ideally, the PowerFlex system and cluster nodes that define the availability zone would be + # # geographically co-located. + # # Optional: true + # # Default value: none + # zone: + # # name: The name of the container orchestrator's availability zone to which the PowerFlex system + # # should be mapped. + # name: "zoneA" + # # protectionDomains: A list of the protection domains and their associated pools, defined in + # # the PowerFlex system. + # # Currently, csi-powerflex only supports one protection domain per zone. + # protectionDomains: + # # pools: A list of pools that belong to a single protection defined in the PowerFlex system. + # # Currently, csi-powerflex only supports one pool per protection domain. + # - pools: + # - "pool1" + # # name: The name of the protection domain in the PowerFlex system. + # # Optional: true + # # name is required if storage pool names are not unique across protection domains. + # name: "domain1" +# To add more PowerFlex systems, uncomment the following lines and provide the required values # - username: "admin" # password: "password" # systemID: "2b11bb111111bb1b" # endpoint: "https://127.0.0.2" # skipCertificateValidation: true # mdm: "10.0.0.3,10.0.0.4" +# zone: +# name: "ZoneB" +# protectionDomains: +# - name: "my-app-domain" +# pools: +# - "my-backend-pool" diff --git a/service/service.go b/service/service.go index 9df53977..cf023a1e 100644 --- a/service/service.go +++ b/service/service.go @@ -107,22 +107,37 @@ var Log = logrus.New() // ArrayConnectionData contains data required to connect to array type ArrayConnectionData struct { - SystemID string `json:"systemID"` - Username string `json:"username"` - Password string `json:"password"` - Endpoint string `json:"endpoint"` - SkipCertificateValidation bool `json:"skipCertificateValidation,omitempty"` - Insecure bool `json:"insecure,omitempty"` - IsDefault bool `json:"isDefault,omitempty"` - AllSystemNames string `json:"allSystemNames"` - NasName string `json:"nasName"` - Mdm string `json:"mdm,omitempty"` - Zone ZoneInfo `json:"zone,omitempty"` -} - -type ZoneInfo struct { - Name string `json:"name"` - LabelKey string `json:"labelKey"` + SystemID string `json:"systemID"` + Username string `json:"username"` + Password string `json:"password"` + Endpoint string `json:"endpoint"` + SkipCertificateValidation bool `json:"skipCertificateValidation,omitempty"` + Insecure bool `json:"insecure,omitempty"` + IsDefault bool `json:"isDefault,omitempty"` + AllSystemNames string `json:"allSystemNames"` + NasName string `json:"nasName"` + Mdm string `json:"mdm,omitempty"` + AvailabilityZone *AvailabilityZone `json:"zone,omitempty"` +} + +// Definitions to make AvailabilityZone decomposition easier to read. +type ( + ZoneName string + ZoneTargetMap map[ZoneName][]ProtectionDomain + ProtectionDomainName string + PoolName string +) + +// AvailabilityZone provides a mapping between cluster zones labels and storage systems +type AvailabilityZone struct { + Name ZoneName `json:"name"` + ProtectionDomains []ProtectionDomain `json:"protectionDomains"` +} + +// ProtectionDomain provides protection domain information for a cluster's availability zone +type ProtectionDomain struct { + Name ProtectionDomainName `json:"name"` + Pools []PoolName `json:"pools"` } // Manifest is the SP's manifest. From 5578a0496d62faa4201f7c32832a1707d1acaf01 Mon Sep 17 00:00:00 2001 From: Fernando Alfaro Campos Date: Fri, 22 Nov 2024 13:12:45 -0500 Subject: [PATCH 02/15] Add multi availability zone storage class (#353) --- samples/storageclass/storageclass-az.yaml | 54 +++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 samples/storageclass/storageclass-az.yaml diff --git a/samples/storageclass/storageclass-az.yaml b/samples/storageclass/storageclass-az.yaml new file mode 100644 index 00000000..c58a6974 --- /dev/null +++ b/samples/storageclass/storageclass-az.yaml @@ -0,0 +1,54 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: vxflexos-az +provisioner: csi-vxflexos.dellemc.com +# reclaimPolicy: PVs that are dynamically created by a StorageClass will have the reclaim policy specified here +# Allowed values: +# Reclaim: retain the PV after PVC deletion +# Delete: delete the PV after PVC deletion +# Optional: true +# Default value: Delete +reclaimPolicy: Delete +# allowVolumeExpansion: allows the users to resize the volume by editing the corresponding PVC object +# Allowed values: +# true: allow users to resize the PVC +# false: does not allow users to resize the PVC +# Optional: true +# Default value: false +allowVolumeExpansion: true +parameters: + # Filesytem type for volumes created by storageclass + # Default value: None if defaultFsType is not mentioned in values.yaml + # Else defaultFsType value mentioned in values.yaml + # will be used as default value + csi.storage.k8s.io/fstype: xfs + # Limit the volume network bandwidth + # Value is a positive number in granularity of 1024 Kbps; 0 = unlimited + # Allowed values: one string for bandwidth limit in Kbps + # Optional: false + # Uncomment the line below if you want to use bandwidthLimitInKbps + # bandwidthLimitInKbps: # Insert bandwidth limit in Kbps + # Limit the volume IOPS + # The number of IOPS must be greater than 10; 0 = unlimited + # Allowed values: one string for iops limit + # Optional: false + # Uncomment the line below if you want to use iopsLimit + # iopsLimit: # Insert iops limit +# volumeBindingMode determines how volume binding and dynamic provisioning should occur +# Allowed values: +# Immediate: volume binding and dynamic provisioning occurs once PVC is created +# WaitForFirstConsumer: delay the binding and provisioning of PV until a pod using the PVC is created. +# Optional: false +# Default value: WaitForFirstConsumer (required for topology section below) +volumeBindingMode: WaitForFirstConsumer +# allowedTopologies helps scheduling pods on worker nodes which match all of below expressions. +# by providing the zone key, the scheduler can make sure that pods are scheduled on the same zone. +# Note: The node must have the same label value with the key zone.csi-vxflexos.dellemc.com and a single +# associated zone. +allowedTopologies: + - matchLabelExpressions: + - key: zone.csi-vxflexos.dellemc.com + values: + - zoneA + - zoneB From bc79b283c84044833e9505ed1c2287f01607d324 Mon Sep 17 00:00:00 2001 From: Fernando Alfaro Campos Date: Thu, 28 Nov 2024 15:38:51 -0500 Subject: [PATCH 03/15] Add Multi-Available Zone Functionality (#354) * Add create volume processing for multi-az * Adjust multi-az secret processing * Rework secret processing * Iterate through all topologies for a zone * Test signing commit * Cleanup secret retrieval and backwards compatibility of storage classes * adapt to use new secret struct * Add multi-az unit tests * Fix golangci-lint issue * Cleanup code logs * Make zone take precendence during NodeGetInfo * Update unit tests credentials --------- Co-authored-by: Lau, Luke --- service/controller.go | 162 +++++++++++++++--- service/features/array-config/config | 8 +- service/features/array-config/config.2 | 12 +- .../features/array-config/duplicate_system_ID | 4 +- .../features/array-config/invalid_endpoint | 2 +- .../features/array-config/invalid_multi_az | 21 +++ .../features/array-config/invalid_password | 2 +- .../features/array-config/invalid_system_name | 4 +- .../features/array-config/invalid_username | 2 +- service/features/array-config/multi_az | 40 +++++ .../features/array-config/replication-config | 8 +- .../features/array-config/two_default_array | 4 +- .../features/get_storage_pool_instances.json | 2 +- service/features/service.feature | 20 ++- service/node.go | 30 +++- service/step_defs_test.go | 83 ++++++++- test/integration/wrong_config.json | 4 +- 17 files changed, 343 insertions(+), 65 deletions(-) create mode 100644 service/features/array-config/invalid_multi_az create mode 100644 service/features/array-config/multi_az diff --git a/service/controller.go b/service/controller.go index f83a78e5..7cd02b3c 100644 --- a/service/controller.go +++ b/service/controller.go @@ -155,20 +155,41 @@ const ( var interestingParameters = [...]string{0: "FsType", 1: KeyMkfsFormatOption, 2: KeyBandwidthLimitInKbps, 3: KeyIopsLimit} +type ZoneContent struct { + systemID string + protectionDomain ProtectionDomainName + pool PoolName +} + func (s *service) CreateVolume( ctx context.Context, req *csi.CreateVolumeRequest) ( *csi.CreateVolumeResponse, error, ) { params := req.GetParameters() + var systemID string + var err error - systemID, err := s.getSystemIDFromParameters(params) - if err != nil { - return nil, err + // This is a map of zone to the arrayID and pool identifier + zoneTargetMap := make(map[ZoneName]ZoneContent) + + if _, ok := params[KeySystemID]; !ok { + zoneTargetMap = s.getZonesFromSecret() } - if err := s.requireProbe(ctx, systemID); err != nil { - return nil, err + if len(zoneTargetMap) == 0 { + sid, err := s.getSystemIDFromParameters(params) + if err != nil { + return nil, err + } + + systemID = sid + } + + if systemID != "" { + if err := s.requireProbe(ctx, systemID); err != nil { + return nil, err + } } s.logStatistics() @@ -191,9 +212,55 @@ func (s *service) CreateVolume( Log.Printf("Received CreateVolume request without accessibility keys") } + // Look for zone topology + zoneTopology := false + var storagePool string + var protectionDomain string var volumeTopology []*csi.Topology systemSegments := map[string]string{} // topology segments matching requested system for a volume - if accessibility != nil && len(accessibility.GetPreferred()) > 0 { + + // Handle Zone topology, which happens when node is annotated with "Zone" label + if len(zoneTargetMap) != 0 && accessibility != nil && len(accessibility.GetPreferred()) > 0 { + for _, topo := range accessibility.GetPreferred() { + for topoLabel, zoneName := range topo.Segments { + if strings.HasPrefix(topoLabel, "zone."+Name) { + zoneTarget, ok := zoneTargetMap[ZoneName(zoneName)] + if !ok { + Log.Infof("no zone target for %s", zoneTarget) + continue + } + + protectionDomain = string(zoneTarget.protectionDomain) + storagePool = string(zoneTarget.pool) + systemID = zoneTarget.systemID + + Log.Infof("Preferred topology zone %s, systemID %s, protectionDomain %s, and storagePool %s", zoneName, systemID, protectionDomain, storagePool) + + if err := s.requireProbe(ctx, systemID); err != nil { + Log.Errorln("Failed to probe system " + systemID) + continue + } + + systemSegments["zone."+Name] = zoneName + volumeTopology = append(volumeTopology, &csi.Topology{ + Segments: systemSegments, + }) + + zoneTopology = true + } + } + + if zoneTopology { + break + } + } + + if !zoneTopology { + return nil, status.Error(codes.InvalidArgument, "no zone topology found in accessibility requirements") + } + } + + if !zoneTopology && accessibility != nil && len(accessibility.GetPreferred()) > 0 { requestedSystem := "" sID := "" system := s.systems[systemID] @@ -451,23 +518,33 @@ func (s *service) CreateVolume( params = mergeStringMaps(params, req.GetSecrets()) // We require the storagePool name for creation - sp, ok := params[KeyStoragePool] - if !ok { - return nil, status.Errorf(codes.InvalidArgument, - "%s is a required parameter", KeyStoragePool) - } + if storagePool == "" { + sp, ok := params[KeyStoragePool] + if !ok { + return nil, status.Errorf(codes.InvalidArgument, + "%s is a required parameter", KeyStoragePool) + } - pdID := "" - pd, ok := params[KeyProtectionDomain] - if !ok { - Log.Printf("Protection Domain name not provided; there could be conflicts if two storage pools share a name") + storagePool = sp } else { - pdID, err = s.getProtectionDomainIDFromName(systemID, pd) - if err != nil { - return nil, err + Log.Printf("[CreateVolume] Multi-AZ Storage Pool Determined by Secret %s", storagePool) + } + + var pdID string + if protectionDomain == "" { + pd, ok := params[KeyProtectionDomain] + if !ok { + Log.Printf("Protection Domain name not provided; there could be conflicts if two storage pools share a name") + } else { + protectionDomain = pd } } + pdID, err = s.getProtectionDomainIDFromName(systemID, protectionDomain) + if err != nil { + return nil, err + } + volType := s.getVolProvisionType(params) // Thick or Thin contentSource := req.GetVolumeContentSource() @@ -475,12 +552,12 @@ func (s *service) CreateVolume( volumeSource := contentSource.GetVolume() if volumeSource != nil { Log.Printf("volume %s specified as volume content source", volumeSource.VolumeId) - return s.Clone(req, volumeSource, name, size, sp) + return s.Clone(req, volumeSource, name, size, storagePool) } snapshotSource := contentSource.GetSnapshot() if snapshotSource != nil { Log.Printf("snapshot %s specified as volume content source", snapshotSource.SnapshotId) - return s.createVolumeFromSnapshot(req, snapshotSource, name, size, sp) + return s.createVolumeFromSnapshot(req, snapshotSource, name, size, storagePool) } } @@ -489,7 +566,7 @@ func (s *service) CreateVolume( fields := map[string]interface{}{ "name": name, "sizeInKiB": size, - "storagePool": sp, + "storagePool": storagePool, "volType": volType, HeaderPersistentVolumeName: params[CSIPersistentVolumeName], HeaderPersistentVolumeClaimName: params[CSIPersistentVolumeClaimName], @@ -517,13 +594,13 @@ func (s *service) CreateVolume( Log.Println("warning: goscaleio.VolumeParam: no MetaData method exists, consider updating goscaleio library.") } - createResp, err := s.adminClients[systemID].CreateVolume(volumeParam, sp, pdID) + createResp, err := s.adminClients[systemID].CreateVolume(volumeParam, storagePool, pdID) if err != nil { // handle case where volume already exists if !strings.EqualFold(err.Error(), sioGatewayVolumeNameInUse) { - Log.Printf("error creating volume: %s pool %s error: %s", name, sp, err.Error()) + Log.Printf("error creating volume: %s pool %s error: %s", name, storagePool, err.Error()) return nil, status.Errorf(codes.Internal, - "error when creating volume %s storagepool %s: %s", name, sp, err.Error()) + "error when creating volume %s storagepool %s: %s", name, storagePool, err.Error()) } } @@ -548,7 +625,7 @@ func (s *service) CreateVolume( // since the volume could have already exists, double check that the // volume has the expected parameters - spID, err := s.getStoragePoolID(sp, systemID, pdID) + spID, err := s.getStoragePoolID(storagePool, systemID, pdID) if err != nil { return nil, status.Errorf(codes.Unavailable, "volume exists, but could not verify parameters: %s", @@ -737,6 +814,7 @@ func (s *service) getSystemIDFromParameters(params map[string]string) (string, e return "", status.Errorf(codes.FailedPrecondition, "No system ID is found in parameters or as default") } } + Log.Printf("getSystemIDFromParameters system %s", systemID) // if name set for array.SystemID use id instead @@ -748,6 +826,36 @@ func (s *service) getSystemIDFromParameters(params map[string]string) (string, e return systemID, nil } +// getZonesFromSecret returns a map with zone names as keys to zone content +// with zone content consisting of the PowerFlex systemID, protection domain and pool. +func (s *service) getZonesFromSecret() map[ZoneName]ZoneContent { + zoneTargetMap := make(map[ZoneName]ZoneContent) + + for _, array := range s.opts.arrays { + availabilityZone := array.AvailabilityZone + if availabilityZone == nil { + continue + } + + zone := availabilityZone.Name + + var pd ProtectionDomainName + if availabilityZone.ProtectionDomains[0].Name != "" { + pd = availabilityZone.ProtectionDomains[0].Name + } + + pool := availabilityZone.ProtectionDomains[0].Pools[0] + + zoneTargetMap[zone] = ZoneContent{ + systemID: array.SystemID, + protectionDomain: pd, + pool: pool, + } + } + + return zoneTargetMap +} + // Create a volume (which is actually a snapshot) from an existing snapshot. // The snapshotSource gives the SnapshotId which is the volume to be replicated. func (s *service) createVolumeFromSnapshot(req *csi.CreateVolumeRequest, @@ -2176,7 +2284,7 @@ func (s *service) getCapacityForAllSystems(ctx context.Context, protectionDomain var systemCapacity int64 var err error - if len(spName) > 0 { + if len(spName) > 0 && spName[0] != "" { systemCapacity, err = s.getSystemCapacity(ctx, array.SystemID, protectionDomain, spName[0]) } else { systemCapacity, err = s.getSystemCapacity(ctx, array.SystemID, "") @@ -2210,7 +2318,7 @@ func (s *service) GetCapacity( systemID := "" params := req.GetParameters() - if params == nil || len(params) == 0 { + if len(params) == 0 { // Get capacity of all systems capacity, err = s.getCapacityForAllSystems(ctx, "") } else { diff --git a/service/features/array-config/config b/service/features/array-config/config index 9f23dc0e..354b69df 100644 --- a/service/features/array-config/config +++ b/service/features/array-config/config @@ -1,8 +1,8 @@ [ { "endpoint": "http://127.0.0.1", - "username": "admin", - "password": "Password123", + "username": "username", + "password": "password", "insecure": true, "isDefault": true, "systemID": "14dbbf5617523654", @@ -10,8 +10,8 @@ }, { "endpoint": "http://127.0.0.2", - "username": "admin", - "password": "Password123", + "username": "username", + "password": "password", "skipCertificateValidation": true, "isDefault": false, "systemID": "15dbbf5617523655", diff --git a/service/features/array-config/config.2 b/service/features/array-config/config.2 index 3418f020..7d397fa5 100644 --- a/service/features/array-config/config.2 +++ b/service/features/array-config/config.2 @@ -1,8 +1,8 @@ [ { "endpoint": "http://127.0.0.1", - "username": "admin", - "password": "Password123", + "username": "username", + "password": "password", "insecure": true, "isDefault": true, "systemID": "14dbbf5617523654" @@ -10,16 +10,16 @@ }, { "endpoint": "http://127.0.0.2", - "username": "admin", - "password": "Password123", + "username": "username", + "password": "password", "insecure": true, "isDefault": false, "systemID": "15dbbf5617523655" "nasName": "dummy-nas-server" }, { - "username": "admin", - "password": "Password123", + "username": "username", + "password": "password", "systemID": "1235e15806d1ec0f", "endpoint": "https://1.2.3.4", "insecure": true, diff --git a/service/features/array-config/duplicate_system_ID b/service/features/array-config/duplicate_system_ID index bf166f94..b88d0366 100644 --- a/service/features/array-config/duplicate_system_ID +++ b/service/features/array-config/duplicate_system_ID @@ -1,6 +1,6 @@ [ { - "username": "admin", + "username": "username", "password": "password", "systemID": "DUPLICATE", "endpoint": "https://107.0.0.1", @@ -9,7 +9,7 @@ "mdm": "10.0.0.1" }, { - "username": "admin", + "username": "username", "password": "password", "systemID": "DUPLICATE", "endpoint": "https://107.0.0.1", diff --git a/service/features/array-config/invalid_endpoint b/service/features/array-config/invalid_endpoint index 865aeca3..d4956508 100644 --- a/service/features/array-config/invalid_endpoint +++ b/service/features/array-config/invalid_endpoint @@ -1,7 +1,7 @@ [ { "endpoint": "", - "username": "admin", + "username": "username", "password": "password", "insecure": true, "isDefault": true, diff --git a/service/features/array-config/invalid_multi_az b/service/features/array-config/invalid_multi_az new file mode 100644 index 00000000..4a870465 --- /dev/null +++ b/service/features/array-config/invalid_multi_az @@ -0,0 +1,21 @@ +[ + { + "endpoint": "http://127.0.0.1", + "username": "username", + "password": "password", + "insecure": true, + "isDefault": true, + "systemID": "14dbbf5617523654", + "zone": { + "name": "notExistent", + "protectionDomains": [ + { + "name": "bad", + "pools": [ + "badPool" + ] + } + ] + } + } +] diff --git a/service/features/array-config/invalid_password b/service/features/array-config/invalid_password index 3ee67634..d6866d84 100644 --- a/service/features/array-config/invalid_password +++ b/service/features/array-config/invalid_password @@ -1,7 +1,7 @@ [ { "endpoint": "http://127.0.0.1", - "username": "admin", + "username": "username", "password": "", "insecure": true, "isDefault": true, diff --git a/service/features/array-config/invalid_system_name b/service/features/array-config/invalid_system_name index 01282a42..02ac7b0d 100644 --- a/service/features/array-config/invalid_system_name +++ b/service/features/array-config/invalid_system_name @@ -1,8 +1,8 @@ [ { "endpoint": "http://127.0.0.1", - "username": "admin", - "password": "Password123", + "username": "username", + "password": "password", "insecure": true, "isDefault": true, "systemID": "" diff --git a/service/features/array-config/invalid_username b/service/features/array-config/invalid_username index 7b3026bb..c51c9b40 100644 --- a/service/features/array-config/invalid_username +++ b/service/features/array-config/invalid_username @@ -2,7 +2,7 @@ { "endpoint": "http://127.0.0.1", "username": "", - "password": "Password123", + "password": "password", "insecure": true, "isDefault": true, "systemID": "123" diff --git a/service/features/array-config/multi_az b/service/features/array-config/multi_az new file mode 100644 index 00000000..22fe8b99 --- /dev/null +++ b/service/features/array-config/multi_az @@ -0,0 +1,40 @@ +[ + { + "endpoint": "http://127.0.0.1", + "username": "username", + "password": "password", + "insecure": true, + "isDefault": true, + "systemID": "14dbbf5617523654", + "zone": { + "name": "zoneA", + "protectionDomains": [ + { + "name": "mocksystem", + "pools": [ + "viki_pool_HDD_20181031" + ] + } + ] + } + }, + { + "endpoint": "http://127.0.0.2", + "username": "username", + "password": "password", + "skipCertificateValidation": true, + "isDefault": false, + "systemID": "15dbbf5617523655", + "zone": { + "name": "zoneB", + "protectionDomains": [ + { + "name": "mocksystem", + "pools": [ + "viki_pool_HDD_20181031" + ] + } + ] + } + } +] diff --git a/service/features/array-config/replication-config b/service/features/array-config/replication-config index 354ad76e..907b33c8 100644 --- a/service/features/array-config/replication-config +++ b/service/features/array-config/replication-config @@ -1,16 +1,16 @@ [ { "endpoint": "http://127.0.0.1", - "username": "admin", - "password": "Password123", + "username": "username", + "password": "password", "insecure": true, "isDefault": true, "systemID": "14dbbf5617523654" }, { "endpoint": "http://127.0.0.1", - "username": "admin", - "password": "Password123", + "username": "username", + "password": "password", "skipCertificateValidation": true, "isDefault": false, "systemID": "15dbbf5617523655" diff --git a/service/features/array-config/two_default_array b/service/features/array-config/two_default_array index 7a3d8e81..102f4720 100644 --- a/service/features/array-config/two_default_array +++ b/service/features/array-config/two_default_array @@ -1,6 +1,6 @@ [ { - "username": "admin", + "username": "username", "password": "password", "systemID": "321", "endpoint": "https://107.0.0.1", @@ -9,7 +9,7 @@ "mdm": "10.0.0.1" }, { - "username": "admin", + "username": "username", "password": "password", "systemID": "123", "endpoint": "https://107.0.0.2", diff --git a/service/features/get_storage_pool_instances.json b/service/features/get_storage_pool_instances.json index 44079420..52dd6f8e 100644 --- a/service/features/get_storage_pool_instances.json +++ b/service/features/get_storage_pool_instances.json @@ -10,7 +10,7 @@ "zeroPaddingEnabled": true, "backgroundScannerMode": "Disabled", "rebalanceIoPriorityPolicy": "favorAppIos", - "protectionDomainId": "b8b3919900000000", + "protectionDomainId": "14dbbf5617523654", "backgroundScannerBWLimitKBps": 0, "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240, "sparePercentage": 10, diff --git a/service/features/service.feature b/service/features/service.feature index 5aaae654..39225fd9 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -1552,5 +1552,21 @@ Feature: VxFlex OS CSI interface Examples: | systemid | nasserver | error | errorMsg | | "15dbbf5617523655" | "63ec8e0d-4551-29a7-e79c-b202f2b914f3" | "" | "none" | - | "15dbbf5617523655" | "invalid-nas-server" | "NasNotFoundError" | "NAS server not found" | - + | "15dbbf5617523655" | "invalid-nas-server" | "NasNotFoundError" | "NAS server not found" | + + Scenario: Create Volume for multi-available zone + Given a VxFlexOS service + And I use config + When I call Probe + And I call CreateVolume with zones + Then the error contains + Examples: + | name | config | errorMsg | + | "volume1" | "multi_az" | "none" | + | "volume1" | "invalid_multi_az" | "no zone topology found in accessibility requirements" | + + Scenario: Call NodeGetInfo with zone label + Given a VxFlexOS service + And I use config "multi_az" + When I call NodeGetInfo with zone labels + Then a valid NodeGetInfo is returned with node topology diff --git a/service/node.go b/service/node.go index a3e21cb8..1674c204 100644 --- a/service/node.go +++ b/service/node.go @@ -733,16 +733,15 @@ func (s *service) NodeGetInfo( } } + labels, err := GetNodeLabels(ctx, s) + if err != nil { + return nil, err + } + var maxVxflexosVolumesPerNode int64 if len(connectedSystemID) != 0 { // Check for node label 'max-vxflexos-volumes-per-node'. If present set 'MaxVolumesPerNode' to this value. // If node label is not present, set 'MaxVolumesPerNode' to default value i.e., 0 - - labels, err := GetNodeLabels(ctx, s) - if err != nil { - return nil, err - } - if val, ok := labels[maxVxflexosVolumesPerNodeLabel]; ok { maxVxflexosVolumesPerNode, err = strconv.ParseInt(val, 10, 64) if err != nil { @@ -764,15 +763,32 @@ func (s *service) NodeGetInfo( // csi-vxflexos.dellemc.com/: Log.Infof("Arrays: %+v", s.opts.arrays) topology := map[string]string{} + if zone, ok := labels["zone."+Name]; ok { + topology["zone."+Name] = zone + } + for _, array := range s.opts.arrays { isNFS, err := s.checkNFS(ctx, array.SystemID) if err != nil { return nil, err } + if isNFS { topology[Name+"/"+array.SystemID+"-nfs"] = "true" } - topology[Name+"/"+array.SystemID] = SystemTopologySystemValue + + if zone, ok := topology["zone."+Name]; ok { + if zone == string(array.AvailabilityZone.Name) { + // Add only the secret values with the correct zone. + nodeID, _ := GetNodeUID(ctx, s) + Log.Infof("Zone found for node ID: %s, adding system ID: %s to node topology", nodeID, array.SystemID) + topology[Name+"/"+array.SystemID] = SystemTopologySystemValue + } + } else { + nodeID, _ := GetNodeUID(ctx, s) + Log.Infof("No zoning found for node ID: %s, adding system ID: %s", nodeID, array.SystemID) + topology[Name+"/"+array.SystemID] = SystemTopologySystemValue + } } nodeID, err := GetNodeUID(ctx, s) diff --git a/service/step_defs_test.go b/service/step_defs_test.go index e7d4d9ff..2008469f 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -4256,12 +4256,15 @@ func (f *feature) iUseConfig(filename string) error { fmt.Printf("****************************************************** s.opts.arrays %v\n", f.service.opts.arrays) f.service.systemProbeAll(context.Background()) f.adminClient = f.service.adminClients[arrayID] - f.adminClient2 = f.service.adminClients[arrayID2] if f.adminClient == nil { return fmt.Errorf("adminClient nil") } - if f.adminClient2 == nil { - return fmt.Errorf("adminClient2 nil") + + if len(f.service.opts.arrays) > 1 { + f.adminClient2 = f.service.adminClients[arrayID2] + if f.adminClient2 == nil { + return fmt.Errorf("adminClient2 nil") + } } return nil } @@ -4677,6 +4680,76 @@ func (f *feature) iCallPingNASServer(systemID string, name string) error { return nil } +func getZoneEnabledRequest() *csi.CreateVolumeRequest { + req := new(csi.CreateVolumeRequest) + params := make(map[string]string) + req.Parameters = params + capacityRange := new(csi.CapacityRange) + capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024 + req.CapacityRange = capacityRange + req.AccessibilityRequirements = new(csi.TopologyRequirement) + topologies := []*csi.Topology{ + { + Segments: map[string]string{ + "zone.csi-vxflexos.dellemc.com": "zoneA", + }, + }, + { + Segments: map[string]string{ + "zone.csi-vxflexos.dellemc.com": "zoneB", + }, + }, + } + req.AccessibilityRequirements.Preferred = topologies + return req +} + +func (f *feature) iCallCreateVolumeWithZones(name string) error { + ctx := new(context.Context) + if f.createVolumeRequest == nil { + req := getZoneEnabledRequest() + f.createVolumeRequest = req + } + req := f.createVolumeRequest + req.Name = name + + fmt.Println("I am in iCallCreateVolume fn.....") + + f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + if f.err != nil { + log.Printf("CreateVolume called failed: %s\n", f.err.Error()) + } + + if f.createVolumeResponse != nil { + log.Printf("vol id %s\n", f.createVolumeResponse.GetVolume().VolumeId) + } + return nil +} + +func mockGetNodeLabelsWithZone(_ context.Context, _ *service) (map[string]string, error) { + labels := map[string]string{"zone.csi-vxflexos.dellemc.com": "zoneA"} + return labels, nil +} + +func (f *feature) iCallNodeGetInfoWithZoneLabels() error { + ctx := new(context.Context) + req := new(csi.NodeGetInfoRequest) + f.service.opts.SdcGUID = "9E56672F-2F4B-4A42-BFF4-88B6846FBFDA" + GetNodeLabels = mockGetNodeLabelsWithZone + GetNodeUID = mockGetNodeUID + f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(*ctx, req) + return nil +} + +func (f *feature) aValidNodeGetInfoIsReturnedWithNodeTopology() error { + accessibility := f.nodeGetInfoResponse.GetAccessibleTopology() + if _, ok := accessibility.Segments["zone.csi-vxflexos.dellemc.com"]; !ok { + return fmt.Errorf("zone not found") + } + + return nil +} + func FeatureContext(s *godog.ScenarioContext) { f := &feature{} s.Step(`^a VxFlexOS service$`, f.aVxFlexOSService) @@ -4901,6 +4974,10 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call GetNodeUID with invalid node$`, f.iCallGetNodeUIDWithInvalidNode) s.Step(`^I call GetNodeUID with unset KubernetesClient$`, f.iCallGetNodeUIDWithUnsetKubernetesClient) + s.Step(`^I call CreateVolume "([^"]*)" with zones$`, f.iCallCreateVolumeWithZones) + s.Step(`^I call NodeGetInfo with zone labels$`, f.iCallNodeGetInfoWithZoneLabels) + s.Step(`^a valid NodeGetInfo is returned with node topology$`, f.aValidNodeGetInfoIsReturnedWithNodeTopology) + s.After(func(ctx context.Context, _ *godog.Scenario, _ error) (context.Context, error) { if f.server != nil { f.server.Close() diff --git a/test/integration/wrong_config.json b/test/integration/wrong_config.json index aff8953b..79808bc7 100644 --- a/test/integration/wrong_config.json +++ b/test/integration/wrong_config.json @@ -1,6 +1,6 @@ [ { - "username": "admin", + "username": "username", "password": "password", "systemID": "system_name", "endpoint": "https://127.0.0.1", @@ -8,7 +8,7 @@ "isDefault": true }, { - "username": "admin", + "username": "username", "password": "password", "systemID": "system_name", "endpoint": "https://127.0.0.1", From 49b41c951696109c3f12500d039f28f53a841853 Mon Sep 17 00:00:00 2001 From: Harshita Pandey <88329939+harshitap26@users.noreply.github.com> Date: Fri, 29 Nov 2024 19:54:07 +0530 Subject: [PATCH 04/15] Unit tests for Node Topologies (#358) * Adding Unit tests * fixing formatting issues --- service/features/service.feature | 14 +++++++++++++- service/step_defs_test.go | 26 +++++++++++++++++++++++++- 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/service/features/service.feature b/service/features/service.feature index 39225fd9..2d8d4301 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -1565,8 +1565,20 @@ Feature: VxFlex OS CSI interface | "volume1" | "multi_az" | "none" | | "volume1" | "invalid_multi_az" | "no zone topology found in accessibility requirements" | + Scenario: Call NodeGetInfo without zone label + Given a VxFlexOS service + And I use config "config" + When I call NodeGetInfo + Then a NodeGetInfo is returned without zone topology + Scenario: Call NodeGetInfo with zone label Given a VxFlexOS service And I use config "multi_az" When I call NodeGetInfo with zone labels - Then a valid NodeGetInfo is returned with node topology + Then a valid NodeGetInfo is returned with node topology + + Scenario: Call NodeGetInfo with zone label with invalid config + Given a VxFlexOS service + And I use config "invalid_multi_az" + When I call NodeGetInfo with zone labels + Then a NodeGetInfo is returned without zone system topology diff --git a/service/step_defs_test.go b/service/step_defs_test.go index 2008469f..713878d2 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -1881,6 +1881,7 @@ func (f *feature) iCallNodeGetInfo() error { req := new(csi.NodeGetInfoRequest) f.service.opts.SdcGUID = "9E56672F-2F4B-4A42-BFF4-88B6846FBFDA" GetNodeLabels = mockGetNodeLabels + GetNodeUID = mockGetNodeUID f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(*ctx, req) return nil } @@ -4727,7 +4728,7 @@ func (f *feature) iCallCreateVolumeWithZones(name string) error { } func mockGetNodeLabelsWithZone(_ context.Context, _ *service) (map[string]string, error) { - labels := map[string]string{"zone.csi-vxflexos.dellemc.com": "zoneA"} + labels := map[string]string{"zone." + Name: "zoneA"} return labels, nil } @@ -4750,6 +4751,27 @@ func (f *feature) aValidNodeGetInfoIsReturnedWithNodeTopology() error { return nil } +func (f *feature) aNodeGetInfoIsReturnedWithoutZoneTopology() error { + accessibility := f.nodeGetInfoResponse.GetAccessibleTopology() + Log.Printf("Node Accessibility %+v", accessibility) + if _, ok := accessibility.Segments["zone."+Name]; ok { + return fmt.Errorf("zone found") + } + return nil +} + +func (f *feature) aNodeGetInfoIsReturnedWithoutZoneSystemTopology() error { + accessibility := f.nodeGetInfoResponse.GetAccessibleTopology() + Log.Printf("Node Accessibility %+v", accessibility) + + for _, array := range f.service.opts.arrays { + if _, ok := accessibility.Segments[Name+"/"+array.SystemID]; ok { + return fmt.Errorf("zone found") + } + } + return nil +} + func FeatureContext(s *godog.ScenarioContext) { f := &feature{} s.Step(`^a VxFlexOS service$`, f.aVxFlexOSService) @@ -4977,6 +4999,8 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call CreateVolume "([^"]*)" with zones$`, f.iCallCreateVolumeWithZones) s.Step(`^I call NodeGetInfo with zone labels$`, f.iCallNodeGetInfoWithZoneLabels) s.Step(`^a valid NodeGetInfo is returned with node topology$`, f.aValidNodeGetInfoIsReturnedWithNodeTopology) + s.Step(`^a NodeGetInfo is returned without zone topology$`, f.aNodeGetInfoIsReturnedWithoutZoneTopology) + s.Step(`^a NodeGetInfo is returned without zone system topology$`, f.aNodeGetInfoIsReturnedWithoutZoneSystemTopology) s.After(func(ctx context.Context, _ *godog.Scenario, _ error) (context.Context, error) { if f.server != nil { From fb876a21da8af24b3e627428f6b6aaf98f74af91 Mon Sep 17 00:00:00 2001 From: Trevor Dawe Date: Mon, 2 Dec 2024 16:05:26 -0400 Subject: [PATCH 05/15] Use generic zone labels (#360) --- samples/secret.yaml | 3 + service/controller.go | 7 +- service/features/array-config/multi_az | 2 + .../array-config/multi_az_custom_labels | 22 ++++++ service/features/service.feature | 12 ++-- service/node.go | 7 +- service/service.go | 27 ++++++++ service/service_unit_test.go | 68 +++++++++++++++++++ service/step_defs_test.go | 20 +++--- 9 files changed, 147 insertions(+), 21 deletions(-) create mode 100644 service/features/array-config/multi_az_custom_labels diff --git a/samples/secret.yaml b/samples/secret.yaml index 7a564952..868e0d9a 100644 --- a/samples/secret.yaml +++ b/samples/secret.yaml @@ -44,6 +44,9 @@ # # name: The name of the container orchestrator's availability zone to which the PowerFlex system # # should be mapped. # name: "zoneA" + # # labelKey: The name of the label used for the availability zone to which the PowerFlex system + # # should be mapped. + # labelKey: "topology.kubernetes.io/zone" # # protectionDomains: A list of the protection domains and their associated pools, defined in # # the PowerFlex system. # # Currently, csi-powerflex only supports one protection domain per zone. diff --git a/service/controller.go b/service/controller.go index 7cd02b3c..42882535 100644 --- a/service/controller.go +++ b/service/controller.go @@ -219,11 +219,12 @@ func (s *service) CreateVolume( var volumeTopology []*csi.Topology systemSegments := map[string]string{} // topology segments matching requested system for a volume - // Handle Zone topology, which happens when node is annotated with "Zone" label + // Handle Zone topology, which happens when node is annotated with a matching zone label if len(zoneTargetMap) != 0 && accessibility != nil && len(accessibility.GetPreferred()) > 0 { for _, topo := range accessibility.GetPreferred() { for topoLabel, zoneName := range topo.Segments { - if strings.HasPrefix(topoLabel, "zone."+Name) { + Log.Infof("Zoning based on label %s", s.opts.zoneLabelKey) + if strings.HasPrefix(topoLabel, s.opts.zoneLabelKey) { zoneTarget, ok := zoneTargetMap[ZoneName(zoneName)] if !ok { Log.Infof("no zone target for %s", zoneTarget) @@ -241,7 +242,7 @@ func (s *service) CreateVolume( continue } - systemSegments["zone."+Name] = zoneName + systemSegments[s.opts.zoneLabelKey] = zoneName volumeTopology = append(volumeTopology, &csi.Topology{ Segments: systemSegments, }) diff --git a/service/features/array-config/multi_az b/service/features/array-config/multi_az index 22fe8b99..49a62da8 100644 --- a/service/features/array-config/multi_az +++ b/service/features/array-config/multi_az @@ -8,6 +8,7 @@ "systemID": "14dbbf5617523654", "zone": { "name": "zoneA", + "labelKey": "zone.csi-vxflexos.dellemc.com", "protectionDomains": [ { "name": "mocksystem", @@ -27,6 +28,7 @@ "systemID": "15dbbf5617523655", "zone": { "name": "zoneB", + "labelKey": "zone.csi-vxflexos.dellemc.com", "protectionDomains": [ { "name": "mocksystem", diff --git a/service/features/array-config/multi_az_custom_labels b/service/features/array-config/multi_az_custom_labels new file mode 100644 index 00000000..b346db0c --- /dev/null +++ b/service/features/array-config/multi_az_custom_labels @@ -0,0 +1,22 @@ +[ + { + "endpoint": "http://127.0.0.1", + "username": "username", + "password": "password", + "insecure": true, + "isDefault": true, + "systemID": "14dbbf5617523654", + "zone": { + "name": "zone1", + "labelKey": "topology.k8s.io/zone", + "protectionDomains": [ + { + "name": "pd", + "pools": [ + "pool1" + ] + } + ] + } + } +] diff --git a/service/features/service.feature b/service/features/service.feature index 2d8d4301..d7c3545b 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -1573,12 +1573,10 @@ Feature: VxFlex OS CSI interface Scenario: Call NodeGetInfo with zone label Given a VxFlexOS service - And I use config "multi_az" + And I use config When I call NodeGetInfo with zone labels Then a valid NodeGetInfo is returned with node topology - - Scenario: Call NodeGetInfo with zone label with invalid config - Given a VxFlexOS service - And I use config "invalid_multi_az" - When I call NodeGetInfo with zone labels - Then a NodeGetInfo is returned without zone system topology + Examples: + | config | + | "multi_az" | + | "multi_az_custom_labels" | diff --git a/service/node.go b/service/node.go index 1674c204..31162df0 100644 --- a/service/node.go +++ b/service/node.go @@ -763,8 +763,9 @@ func (s *service) NodeGetInfo( // csi-vxflexos.dellemc.com/: Log.Infof("Arrays: %+v", s.opts.arrays) topology := map[string]string{} - if zone, ok := labels["zone."+Name]; ok { - topology["zone."+Name] = zone + + if zone, ok := labels[s.opts.zoneLabelKey]; ok { + topology[s.opts.zoneLabelKey] = zone } for _, array := range s.opts.arrays { @@ -777,7 +778,7 @@ func (s *service) NodeGetInfo( topology[Name+"/"+array.SystemID+"-nfs"] = "true" } - if zone, ok := topology["zone."+Name]; ok { + if zone, ok := topology[s.opts.zoneLabelKey]; ok { if zone == string(array.AvailabilityZone.Name) { // Add only the secret values with the correct zone. nodeID, _ := GetNodeUID(ctx, s) diff --git a/service/service.go b/service/service.go index cf023a1e..ddd79830 100644 --- a/service/service.go +++ b/service/service.go @@ -131,6 +131,7 @@ type ( // AvailabilityZone provides a mapping between cluster zones labels and storage systems type AvailabilityZone struct { Name ZoneName `json:"name"` + LabelKey string `json:"labelKey"` ProtectionDomains []ProtectionDomain `json:"protectionDomains"` } @@ -187,6 +188,7 @@ type Opts struct { IsQuotaEnabled bool // allow driver to enable quota limits for NFS volumes ExternalAccess string // used for adding extra IP/IP range to the NFS export KubeNodeName string + zoneLabelKey string } type service struct { @@ -418,6 +420,13 @@ func (s *service) BeforeServe( return err } + // if custom zoning is being used, find the common label from the array secret + opts.zoneLabelKey, err = getZoneKeyLabelFromSecret(opts.arrays) + if err != nil { + Log.Warnf("unable to get zone key from secret: %s", err.Error()) + return err + } + if err = s.ProcessMapSecretChange(); err != nil { Log.Warnf("unable to configure dynamic configMap secret change detection : %s", err.Error()) return err @@ -1882,3 +1891,21 @@ func ParseInt64FromContext(ctx context.Context, key string) (int64, error) { func lookupEnv(ctx context.Context, key string) (string, bool) { return csictx.LookupEnv(ctx, key) } + +func getZoneKeyLabelFromSecret(arrays map[string]*ArrayConnectionData) (string, error) { + zoneKeyLabel := "" + + for _, array := range arrays { + if array.AvailabilityZone != nil { + if zoneKeyLabel == "" { + // Assumes that the key parameter is not empty + zoneKeyLabel = array.AvailabilityZone.LabelKey + } else if zoneKeyLabel != array.AvailabilityZone.LabelKey { + Log.Warnf("array %s zone key %s does not match %s", array.SystemID, array.AvailabilityZone.LabelKey, zoneKeyLabel) + return "", fmt.Errorf("array %s zone key %s does not match %s", array.SystemID, array.AvailabilityZone.LabelKey, zoneKeyLabel) + } + } + } + + return zoneKeyLabel, nil +} diff --git a/service/service_unit_test.go b/service/service_unit_test.go index ff8c0b0a..a5b21f9f 100644 --- a/service/service_unit_test.go +++ b/service/service_unit_test.go @@ -532,6 +532,74 @@ func TestGetIPAddressByInterface(t *testing.T) { } } +func TestGetZoneKeyLabelFromSecret(t *testing.T) { + tests := []struct { + name string + arrays map[string]*ArrayConnectionData + expectedLabel string + expectedErr error + }{ + { + name: "Empty array connection data", + arrays: map[string]*ArrayConnectionData{}, + expectedLabel: "", + expectedErr: nil, + }, + { + name: "Array connection data with same zone label keys", + arrays: map[string]*ArrayConnectionData{ + "array1": { + AvailabilityZone: &AvailabilityZone{ + Name: "zone1", + LabelKey: "custom-zone.io/area", + }, + }, + "array2": { + AvailabilityZone: &AvailabilityZone{ + Name: "zone2", + LabelKey: "custom-zone.io/area", + }, + }, + }, + expectedLabel: "custom-zone.io/area", + expectedErr: nil, + }, + { + name: "Array connection data with different label keys", + arrays: map[string]*ArrayConnectionData{ + "array1": { + SystemID: "system-1", + AvailabilityZone: &AvailabilityZone{ + Name: "zone1", + LabelKey: "custom-zone-1.io/area", + }, + }, + "array2": { + SystemID: "system-2", + AvailabilityZone: &AvailabilityZone{ + Name: "zone2", + LabelKey: "custom-zone-2.io/area", + }, + }, + }, + expectedLabel: "", + expectedErr: fmt.Errorf("array system-2 zone key custom-zone-2.io/area does not match custom-zone-1.io/area"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + label, err := getZoneKeyLabelFromSecret(tt.arrays) + if tt.expectedErr == nil { + assert.Nil(t, err) + } else { + assert.NotNil(t, err) + } + assert.Equal(t, label, tt.expectedLabel) + }) + } +} + func TestFindNetworkInterfaceIPs(t *testing.T) { tests := []struct { name string diff --git a/service/step_defs_test.go b/service/step_defs_test.go index 713878d2..ced2869e 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -4254,6 +4254,10 @@ func (f *feature) iUseConfig(filename string) error { } } + f.service.opts.zoneLabelKey, err = getZoneKeyLabelFromSecret(f.service.opts.arrays) + if err != nil { + return fmt.Errorf("get zone key label from secret: %s", err.Error()) + } fmt.Printf("****************************************************** s.opts.arrays %v\n", f.service.opts.arrays) f.service.systemProbeAll(context.Background()) f.adminClient = f.service.adminClients[arrayID] @@ -4681,7 +4685,7 @@ func (f *feature) iCallPingNASServer(systemID string, name string) error { return nil } -func getZoneEnabledRequest() *csi.CreateVolumeRequest { +func getZoneEnabledRequest(zoneLabelName string) *csi.CreateVolumeRequest { req := new(csi.CreateVolumeRequest) params := make(map[string]string) req.Parameters = params @@ -4692,12 +4696,12 @@ func getZoneEnabledRequest() *csi.CreateVolumeRequest { topologies := []*csi.Topology{ { Segments: map[string]string{ - "zone.csi-vxflexos.dellemc.com": "zoneA", + zoneLabelName: "zoneA", }, }, { Segments: map[string]string{ - "zone.csi-vxflexos.dellemc.com": "zoneB", + zoneLabelName: "zoneB", }, }, } @@ -4708,7 +4712,7 @@ func getZoneEnabledRequest() *csi.CreateVolumeRequest { func (f *feature) iCallCreateVolumeWithZones(name string) error { ctx := new(context.Context) if f.createVolumeRequest == nil { - req := getZoneEnabledRequest() + req := getZoneEnabledRequest(f.service.opts.zoneLabelKey) f.createVolumeRequest = req } req := f.createVolumeRequest @@ -4727,8 +4731,8 @@ func (f *feature) iCallCreateVolumeWithZones(name string) error { return nil } -func mockGetNodeLabelsWithZone(_ context.Context, _ *service) (map[string]string, error) { - labels := map[string]string{"zone." + Name: "zoneA"} +func mockGetNodeLabelsWithZone(_ context.Context, s *service) (map[string]string, error) { + labels := map[string]string{s.opts.zoneLabelKey: "zoneA"} return labels, nil } @@ -4744,7 +4748,7 @@ func (f *feature) iCallNodeGetInfoWithZoneLabels() error { func (f *feature) aValidNodeGetInfoIsReturnedWithNodeTopology() error { accessibility := f.nodeGetInfoResponse.GetAccessibleTopology() - if _, ok := accessibility.Segments["zone.csi-vxflexos.dellemc.com"]; !ok { + if _, ok := accessibility.Segments[f.service.opts.zoneLabelKey]; !ok { return fmt.Errorf("zone not found") } @@ -4754,7 +4758,7 @@ func (f *feature) aValidNodeGetInfoIsReturnedWithNodeTopology() error { func (f *feature) aNodeGetInfoIsReturnedWithoutZoneTopology() error { accessibility := f.nodeGetInfoResponse.GetAccessibleTopology() Log.Printf("Node Accessibility %+v", accessibility) - if _, ok := accessibility.Segments["zone."+Name]; ok { + if _, ok := accessibility.Segments[f.service.opts.zoneLabelKey]; ok { return fmt.Errorf("zone found") } return nil From d9c5c4d0b580420b72930baf8409546e362679c9 Mon Sep 17 00:00:00 2001 From: Fernando Alfaro Campos Date: Wed, 4 Dec 2024 09:38:20 -0500 Subject: [PATCH 06/15] Add zone volume creation integration tests (#359) * Add zone volume creation integration tests * Fix golangci-lint issue * Create e2e zone tests * Adjust indentation of template yaml files * Adjust indentation of template yaml files * Update e2e file names * Format e2e files for golangci-lint * Update e2e test scenarios * Update pod templates * Address PR comments * Add README to e2e tests * Update sts templates --- Makefile | 2 +- test/Makefile | 23 ++ test/e2e/README.md | 24 ++ test/e2e/e2e.go | 367 ++++++++++++++++++ test/e2e/e2e_test.go | 40 ++ test/e2e/features/e2e.feature | 31 ++ test/e2e/templates/zone-wait/sts.yaml | 43 ++ .../features/array-config/multi-az | 23 ++ test/integration/features/integration.feature | 23 ++ test/integration/integration_test.go | 93 +++-- test/integration/run.sh | 5 +- test/integration/step_defs_test.go | 71 ++++ 12 files changed, 703 insertions(+), 42 deletions(-) create mode 100644 test/Makefile create mode 100644 test/e2e/README.md create mode 100644 test/e2e/e2e.go create mode 100644 test/e2e/e2e_test.go create mode 100644 test/e2e/features/e2e.feature create mode 100644 test/e2e/templates/zone-wait/sts.yaml create mode 100644 test/integration/features/array-config/multi-az diff --git a/Makefile b/Makefile index 2efe1809..5f30ccfd 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ unit-test: # Linux only; populate env.sh with the hardware parameters integration-test: - ( cd test/integration; sh run.sh ) + ( cd test/integration; sh run.sh TestIntegration ) check: @scripts/check.sh ./provider/ ./service/ diff --git a/test/Makefile b/test/Makefile new file mode 100644 index 00000000..f9ddff70 --- /dev/null +++ b/test/Makefile @@ -0,0 +1,23 @@ +.PHONY: all +all: help + +help: + @echo + @echo "The following targets are commonly used:" + @echo + @echo "integration - Run integration tests" + @echo "zone-integration - Run zone-integration tests" + @echo "zone-e2e - Run zone-e2e tests" + @echo + +.PHONY: integration +integration: + cd integration; ./run.sh TestIntegration + +.PHONY: zone-integration +zone-integration: + cd integration; ./run.sh TestZoneIntegration + +.PHONY: zone-e2e +zone-e2e: + go test -v -count=1 -timeout 1h -run '^TestZoneVolumes$$' ./e2e diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 00000000..77416699 --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,24 @@ +# End-to-End Tests Overview + +## Prerequisite + +A couple of assumptions made in the e2e tests are that the driver runs with a valid secret. Other assumptions are: + +1. The namespace `vxflexos-test` exists since all k8s objects will be created and checked for there. +2. For `multi-available-zone` tests, the storage class `vxflexos-az-wait` exists (see [storage class](../../samples/storageclass/storageclass-az.yaml) for example). +3. For `multi-available-zone` tests, the nodes should be configured correctly as the test will pull out the zoneLabelKey and compare across the nodes and storage class. + +## Using the Makefile + +In the root `test` directory, there is a `Makefile` with different targets for the integration test and the e2e test. Currently, the only target is: + +- `make zone-e2e` -> Runs the end to end tests for `multi-available-zone`. + +## Overview of Targets/Tests + +### Multi-Available Zone + +The following tests are implemented and run during the `zone-e2e` test. + +1. Creates a stateful set of 7 replicas and ensures that everything is up and ready with the zone configuration. +2. Cordons a node (marks it as unschedulable), creates 7 volumes/pods, and ensures that none gets scheduled on the cordoned node. diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go new file mode 100644 index 00000000..26611d17 --- /dev/null +++ b/test/e2e/e2e.go @@ -0,0 +1,367 @@ +// Copyright © 2024 Dell Inc. or its subsidiaries. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package e2e + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "log" + "os/exec" + "strings" + "time" + + "github.com/cucumber/godog" + "github.com/dell/csi-vxflexos/v2/service" + v1 "k8s.io/api/apps/v1" + v1Core "k8s.io/api/core/v1" + v1Storage "k8s.io/api/storage/v1" + "sigs.k8s.io/yaml" +) + +const ( + driverNamespace = "vxflexos" + testNamespace = "vxflexos-test" + storageClass = "vxflexos-az-wait" +) + +type feature struct { + errs []error + zoneNodeMapping map[string]string + zoneKey string + supportedZones []string + cordonedNode string +} + +func (f *feature) aVxFlexOSService() error { + f.errs = make([]error, 0) + f.zoneNodeMapping = make(map[string]string) + f.supportedZones = make([]string, 0) + f.cordonedNode = "" + f.zoneKey = "" + return nil +} + +func (f *feature) isEverythingWorking() error { + log.Println("[isEverythingWorking] Checking if everything is working...") + checkNamespace := "kubectl get ns -A | grep -e " + driverNamespace + " -e " + testNamespace + result, err := execLocalCommand(checkNamespace) + if err != nil { + return err + } + + if !strings.Contains(string(result), driverNamespace) || !strings.Contains(string(result), testNamespace) { + return fmt.Errorf("namespace %s or %s not found", driverNamespace, testNamespace) + } + + checkDeployment := "kubectl get deployment -n " + driverNamespace + " vxflexos-controller -o json" + result, err = execLocalCommand(checkDeployment) + if err != nil { + return err + } + + deploymentInfo := v1.Deployment{} + err = json.Unmarshal(result, &deploymentInfo) + if err != nil { + return err + } + + if deploymentInfo.Status.Replicas != deploymentInfo.Status.ReadyReplicas { + return fmt.Errorf("deployment not ready, check deployment status and then try again") + } + + return nil +} + +func (f *feature) verifyZoneInfomation(secret, namespace string) error { + arrays, err := f.getZoneFromSecret(secret, namespace) + if err != nil { + return err + } + + for _, array := range arrays { + if array.AvailabilityZone == nil { + continue + } + + // Find the first zone label and assume all others are the same.. + f.zoneKey = array.AvailabilityZone.LabelKey + break + } + + if f.zoneKey == "" { + return fmt.Errorf("no labelKey found in secret %s", secret) + } + + getNodeLabels := []string{"kubectl", "get", "nodes", "-A", "-o", "jsonpath='{.items}'"} + justString := strings.Join(getNodeLabels, " ") + + result, err := execLocalCommand(justString) + if err != nil { + return nil + } + + nodes := []v1Core.Node{} + err = json.Unmarshal(result, &nodes) + if err != nil { + return err + } + + for _, node := range nodes { + if strings.Contains(node.ObjectMeta.Name, "master") { + continue + } + + if val, ok := node.ObjectMeta.Labels[f.zoneKey]; ok { + f.zoneNodeMapping[node.ObjectMeta.Name] = val + } + } + + if len(f.zoneNodeMapping) == 0 { + return fmt.Errorf("no nodes found for zone: %s", f.zoneKey) + } + + getStorageClassCmd := "kubectl get sc " + storageClass + _, err = execLocalCommand(getStorageClassCmd) + if err != nil { + return fmt.Errorf("storage class %s not found", storageClass) + } + + scInfo := v1Storage.StorageClass{} + getStorageClassInfo := "kubectl get sc " + storageClass + " -o json" + result, err = execLocalCommand(getStorageClassInfo) + if err != nil { + return fmt.Errorf("storage class %s not found", storageClass) + } + err = json.Unmarshal(result, &scInfo) + if err != nil { + return err + } + + if scInfo.AllowedTopologies == nil { + return fmt.Errorf("no topologies found for storage class %s not found", storageClass) + } + + if scInfo.AllowedTopologies[0].MatchLabelExpressions[0].Key != f.zoneKey { + return fmt.Errorf("storage class %s does not have the proper zone lablel %s", storageClass, f.zoneKey) + } + + // Add supported zones from the test storage class. + f.supportedZones = scInfo.AllowedTopologies[0].MatchLabelExpressions[0].Values + + return nil +} + +func (f *feature) getZoneFromSecret(secretName, namespace string) ([]service.ArrayConnectionData, error) { + getSecretInformation := []string{"kubectl", "get", "secrets", "-n", namespace, secretName, "-o", "jsonpath='{.data.config}'"} + justString := strings.Join(getSecretInformation, " ") + + result, err := execLocalCommand(justString) + if err != nil { + return nil, err + } + + dec, err := base64.StdEncoding.DecodeString(string(result)) + if err != nil { + return nil, err + } + + arrayConnection := make([]service.ArrayConnectionData, 0) + err = yaml.Unmarshal(dec, &arrayConnection) + if err != nil { + return nil, err + } + + return arrayConnection, nil +} + +func (f *feature) createZoneVolumes(fileLocation string) error { + createStsCmd := "kubectl apply -f templates/" + fileLocation + "/sts.yaml" + _, err := execLocalCommand(createStsCmd) + if err != nil { + return err + } + + time.Sleep(10 * time.Second) + + log.Println("[createZoneVolumes] Created volumes and pods...") + return nil +} + +func (f *feature) deleteZoneVolumes(fileLocation string) error { + deleteStsCmd := "kubectl delete -f templates/" + fileLocation + "/sts.yaml" + _, err := execLocalCommand(deleteStsCmd) + if err != nil { + return err + } + + time.Sleep(10 * time.Second) + + log.Println("[deleteZoneVolumes] Deleted volumes and pods...") + return nil +} + +func (f *feature) checkStatfulSetStatus() error { + log.Println("[checkStatfulSetStatus] checking statefulset status") + + err := f.isStatefulSetReady() + if err != nil { + return err + } + + return nil +} + +func (f *feature) isStatefulSetReady() error { + ready := false + attempts := 0 + + for attempts < 15 { + getStsCmd := "kubectl get sts -n " + testNamespace + " vxflextest-az -o json" + result, err := execLocalCommand(getStsCmd) + if err != nil { + return err + } + + sts := v1.StatefulSet{} + err = json.Unmarshal(result, &sts) + if err != nil { + return err + } + + // Everything should be ready. + if *sts.Spec.Replicas == sts.Status.ReadyReplicas { + ready = true + break + } + + attempts++ + time.Sleep(10 * time.Second) + } + + if !ready { + return fmt.Errorf("statefulset not ready, check statefulset status and then try again") + } + + return nil +} + +func (f *feature) getStatefulSetPods() ([]v1Core.Pod, error) { + getZonePods := "kubectl get pods -n " + testNamespace + " -l app=vxflextest-az -o jsonpath='{.items}'" + result, err := execLocalCommand(getZonePods) + if err != nil { + return nil, err + } + + pods := []v1Core.Pod{} + err = json.Unmarshal(result, &pods) + if err != nil { + return nil, err + } + + log.Println("[getStatefulSetPods] Pods found: ", len(pods)) + + return pods, nil +} + +func (f *feature) cordonNode() error { + nodeToCordon := "" + + // Get the first node in the zone + for key := range f.zoneNodeMapping { + log.Printf("[cordonNode] Cordoning node: %s\n", key) + nodeToCordon = key + break + } + + if nodeToCordon == "" { + return fmt.Errorf("no node to cordon found") + } + + cordonNodeCommand := "kubectl cordon " + nodeToCordon + _, err := execLocalCommand(cordonNodeCommand) + if err != nil { + return err + } + + // When cordon, the NoSchedule taint is found + checkNodeStatus := "kubectl get node " + nodeToCordon + " -o json | grep NoSchedule" + result, err := execLocalCommand(checkNodeStatus) + if err != nil { + return err + } + + if !strings.Contains(string(result), "NoSchedule") { + return fmt.Errorf("node %s not cordoned", nodeToCordon) + } + + log.Println("[cordonNode] Cordoned node correctly") + f.cordonedNode = nodeToCordon + + return nil +} + +func (f *feature) checkPodsForCordonRun() error { + log.Println("[checkPodsForCordonRun] checking pods status") + + pods, err := f.getStatefulSetPods() + if err != nil { + return fmt.Errorf("pods not ready, check pods status and then try again") + } + + for _, pod := range pods { + if pod.Spec.NodeName == f.cordonedNode { + return fmt.Errorf("pod %s scheduled incorrectly", pod.ObjectMeta.Name) + } + } + + log.Println("[checkPodsForCordonRun] Pods scheduled correctly, reseting node...") + + // Reset node since scheduled correctly + uncordonNodeCommand := "kubectl uncordon " + f.cordonedNode + _, err = execLocalCommand(uncordonNodeCommand) + if err != nil { + return err + } + + return nil +} + +func execLocalCommand(command string) ([]byte, error) { + var buf bytes.Buffer + cmd := exec.Command("bash", "-c", command) + cmd.Stdout = &buf + cmd.Stderr = &buf + + err := cmd.Run() + if err != nil { + return buf.Bytes(), err + } + + return buf.Bytes(), nil +} + +func InitializeScenario(s *godog.ScenarioContext) { + f := &feature{} + + s.Step(`^a VxFlexOS service$`, f.aVxFlexOSService) + s.Step(`^verify driver is configured and running correctly$`, f.isEverythingWorking) + s.Step(`^verify zone information from secret "([^"]*)" in namespace "([^"]*)"$`, f.verifyZoneInfomation) + s.Step(`^create zone volume and pod in "([^"]*)"$`, f.createZoneVolumes) + s.Step(`^delete zone volume and pod in "([^"]*)"$`, f.deleteZoneVolumes) + s.Step(`^check the statefulset for zones$`, f.checkStatfulSetStatus) + s.Step(`^cordon one node$`, f.cordonNode) + s.Step(`^ensure pods aren't scheduled incorrectly and still running$`, f.checkPodsForCordonRun) +} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go new file mode 100644 index 00000000..6d0a1efa --- /dev/null +++ b/test/e2e/e2e_test.go @@ -0,0 +1,40 @@ +// Copyright © 2024 Dell Inc. or its subsidiaries. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package e2e + +import ( + "testing" + + "github.com/cucumber/godog" +) + +func TestZoneVolumes(t *testing.T) { + godogOptions := godog.Options{ + Format: "pretty,junit:zone-volumes-test-report.xml", + Paths: []string{"features"}, + Tags: "zone", + TestingT: t, + StopOnFailure: true, + } + + status := godog.TestSuite{ + Name: "zone-volumes", + ScenarioInitializer: InitializeScenario, + Options: &godogOptions, + }.Run() + + if status != 0 { + t.Fatalf("error: there were failed zone volumes tests. status: %d", status) + } +} diff --git a/test/e2e/features/e2e.feature b/test/e2e/features/e2e.feature new file mode 100644 index 00000000..aa73d824 --- /dev/null +++ b/test/e2e/features/e2e.feature @@ -0,0 +1,31 @@ +Feature: VxFlex OS CSI interface + As a consumer of the CSI interface + I want to run a system test + So that I know the service functions correctly. + + @zone + Scenario: Create zone volume through k8s + Given a VxFlexOS service + And verify driver is configured and running correctly + And verify zone information from secret in namespace + Then create zone volume and pod in + And check the statefulset for zones + Then delete zone volume and pod in + Examples: + | secret | namespace | location | + | "vxflexos-config" | "vxflexos" | "zone-wait" | + + @zone + Scenario: Cordon node and create zone volume through k8s + Given a VxFlexOS service + And verify driver is configured and running correctly + And verify zone information from secret in namespace + Then cordon one node + Then create zone volume and pod in + And check the statefulset for zones + And ensure pods aren't scheduled incorrectly and still running + Then delete zone volume and pod in + Examples: + | secret | namespace | location | + | "vxflexos-config" | "vxflexos" | "zone-wait" | + diff --git a/test/e2e/templates/zone-wait/sts.yaml b/test/e2e/templates/zone-wait/sts.yaml new file mode 100644 index 00000000..9419e2fa --- /dev/null +++ b/test/e2e/templates/zone-wait/sts.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vxflextest-az + namespace: vxflexos-test +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: vxflextest-az + namespace: vxflexos-test +spec: + replicas: 7 + selector: + matchLabels: + app: vxflextest-az + serviceName: test-az + template: + metadata: + labels: + app: vxflextest-az + spec: + serviceAccount: vxflextest-az + containers: + - name: test-ctr + image: quay.io/quay/busybox:latest + command: ["/bin/sleep", "3600"] + volumeMounts: + - mountPath: "/data0" + name: multi-az-pvc + volumeClaimTemplates: + - metadata: + name: multi-az-pvc + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: "vxflexos-az-wait" + resources: + requests: + storage: 8Gi + persistentVolumeClaimRetentionPolicy: + whenDeleted: Delete + whenScaled: Delete diff --git a/test/integration/features/array-config/multi-az b/test/integration/features/array-config/multi-az new file mode 100644 index 00000000..effd06fe --- /dev/null +++ b/test/integration/features/array-config/multi-az @@ -0,0 +1,23 @@ +[ + { + "endpoint": "https://127.0.0.1", + "username": "username", + "password": "password", + "insecure": true, + "isDefault": true, + "systemID": "0000000000000001", + "mdm": "127.0.0.2,127.0.0.3", + "zone": { + "name": "myZone", + "labelKey": "zone.csi-vxflexos.dellemc.com", + "protectionDomains": [ + { + "name": "myDomain", + "pools": [ + "myPool" + ] + } + ] + } + } +] diff --git a/test/integration/features/integration.feature b/test/integration/features/integration.feature index 0a8d06dd..4e1f7fcf 100644 --- a/test/integration/features/integration.feature +++ b/test/integration/features/integration.feature @@ -990,3 +990,26 @@ Scenario: Custom file system format options (mkfsFormatOption) | "mount" | "-T largefile4" |"single-node-multi-writer" | "ext4" | "none" | | "mount" | ":-L MyVolume" | "single-writer" | "xfs" | "error performing private mount" | | "mount" | "abc" | "single-node-single-writer" | "ext4" | "error performing private mount" | + +@zone-integration +Scenario: Create publish, unpublish, and delete zone volume + Given a VxFlexOS service + And I create a zone volume request "zone-integration-vol" + When I call CreateVolume + And there are no errors + And when I call PublishVolume "SDC_GUID" + And there are no errors + And when I call UnpublishVolume "SDC_GUID" + And there are no errors + And when I call DeleteVolume + Then there are no errors + +@zone-integration +Scenario: Create zone volume with invalid zone information + Given a VxFlexOS service + And I create an invalid zone volume request + When I call CreateVolume + Then the error message should contain + Examples: + | errormsg | + | "no zone topology found in accessibility requirements" | diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 07a84836..6471e6b5 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -17,19 +17,16 @@ import ( "bufio" "context" "fmt" - "net" "os" "strings" "testing" "time" - "github.com/akutz/memconn" csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/cucumber/godog" "github.com/dell/csi-vxflexos/v2/provider" "github.com/dell/csi-vxflexos/v2/service" "github.com/dell/gocsi/utils" - "github.com/stretchr/testify/assert" "google.golang.org/grpc" ) @@ -72,11 +69,11 @@ func init() { } } -func TestMain(m *testing.M) { +func TestIntegration(t *testing.T) { var stop func() ctx := context.Background() fmt.Printf("calling startServer") - grpcClient, stop = startServer(ctx) + grpcClient, stop = startServer(ctx, "") fmt.Printf("back from startServer") time.Sleep(5 * time.Second) @@ -114,11 +111,54 @@ func TestMain(m *testing.M) { Options: &opts, }.Run() - if st := m.Run(); st > exitVal { - exitVal = st + stop() + if exitVal != 0 { + t.Fatalf("[TestIntegration] godog exited with %d", exitVal) + } +} + +func TestZoneIntegration(t *testing.T) { + var stop func() + ctx := context.Background() + fmt.Printf("calling startServer") + grpcClient, stop = startServer(ctx, "features/array-config/multi-az") + fmt.Printf("back from startServer") + time.Sleep(5 * time.Second) + + fmt.Printf("Checking %s\n", datadir) + err := os.Mkdir(datadir, 0o777) + if err != nil && !os.IsExist(err) { + fmt.Printf("%s: %s\n", datadir, err) } + + fmt.Printf("Checking %s\n", datafile) + file, err := os.Create(datafile) + if err != nil && !os.IsExist(err) { + fmt.Printf("%s %s\n", datafile, err) + } + + if file != nil { + file.Close() + } + + godogOptions := godog.Options{ + Format: "pretty,junit:zone-volumes-test-report.xml", + Paths: []string{"features"}, + Tags: "zone-integration", + TestingT: t, + StopOnFailure: true, + } + + exitVal := godog.TestSuite{ + Name: "zone-integration", + ScenarioInitializer: FeatureContext, + Options: &godogOptions, + }.Run() + stop() - os.Exit(exitVal) + if exitVal != 0 { + t.Fatalf("[TestZoneIntegration] godog exited with %d", exitVal) + } } func TestIdentityGetPluginInfo(t *testing.T) { @@ -134,9 +174,12 @@ func TestIdentityGetPluginInfo(t *testing.T) { } } -func startServer(ctx context.Context) (*grpc.ClientConn, func()) { +func startServer(ctx context.Context, cFile string) (*grpc.ClientConn, func()) { + if cFile == "" { + cFile = configFile + } // Create a new SP instance and serve it with a piped connection. - service.ArrayConfigFile = configFile + service.ArrayConfigFile = cFile sp := provider.New() lis, err := utils.GetCSIEndpointListener() if err != nil { @@ -173,33 +216,3 @@ func startServer(ctx context.Context) (*grpc.ClientConn, func()) { sp.GracefulStop(ctx) } } - -func startServerX(ctx context.Context, t *testing.T) (*grpc.ClientConn, func()) { - // Create a new SP instance and serve it with a piped connection. - sp := provider.New() - lis, err := memconn.Listen("memu", "csi-test") - assert.NoError(t, err) - go func() { - if err := sp.Serve(ctx, lis); err != nil { - assert.EqualError(t, err, "http: Server closed") - } - }() - - clientOpts := []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { - return memconn.Dial("memu", "csi-test") - }), - } - - // Create a client for the piped connection. - client, err := grpc.DialContext(ctx, "unix:./unix_sock", clientOpts...) - if err != nil { - fmt.Printf("DialContext error: %s\n", err.Error()) - } - - return client, func() { - client.Close() - sp.GracefulStop(ctx) - } -} diff --git a/test/integration/run.sh b/test/integration/run.sh index 754504c7..be47ac17 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -23,7 +23,10 @@ if [ $rc -ne 0 ]; then echo "failed http unauthorized test"; exit $rc; fi rm -f unix_sock . ../../env.sh echo $SDC_GUID -GOOS=linux CGO_ENABLED=0 GO111MODULE=on go test -v -coverprofile=c.linux.out -timeout 60m -coverpkg=github.com/dell/csi-vxflexos/service *test.go & + +testRun=$1 + +GOOS=linux CGO_ENABLED=0 GO111MODULE=on go test -v -coverprofile=c.linux.out -timeout 60m -coverpkg=github.com/dell/csi-vxflexos/service -run "^$testRun\$\$" & if [ -f ./csi-sanity ] ; then sleep 5 ./csi-sanity --csi.endpoint=./unix_sock --csi.testvolumeparameters=./pool.yml --csi.testvolumesize 8589934592 diff --git a/test/integration/step_defs_test.go b/test/integration/step_defs_test.go index cc900ff5..221d7d9b 100644 --- a/test/integration/step_defs_test.go +++ b/test/integration/step_defs_test.go @@ -2734,6 +2734,75 @@ func (f *feature) checkNFS(_ context.Context, systemID string) (bool, error) { return false, nil } +func (f *feature) iCreateZoneRequest(name string) error { + req := f.createGenericZoneRequest(name) + req.AccessibilityRequirements = new(csi.TopologyRequirement) + req.AccessibilityRequirements.Preferred = []*csi.Topology{ + { + Segments: map[string]string{ + "zone.csi-vxflexos.dellemc.com": "zoneA", + }, + }, + { + Segments: map[string]string{ + "zone.csi-vxflexos.dellemc.com": "zoneB", + }, + }, + } + + f.createVolumeRequest = req + + return nil +} + +func (f *feature) iCreateInvalidZoneRequest() error { + req := f.createGenericZoneRequest("invalid-zone-volume") + req.AccessibilityRequirements = new(csi.TopologyRequirement) + topologies := []*csi.Topology{ + { + Segments: map[string]string{ + "zone.csi-vxflexos.dellemc.com": "invalidZoneInfo", + }, + }, + } + req.AccessibilityRequirements.Preferred = topologies + f.createVolumeRequest = req + + return nil +} + +func (f *feature) createGenericZoneRequest(name string) *csi.CreateVolumeRequest { + req := new(csi.CreateVolumeRequest) + storagePool := os.Getenv("STORAGE_POOL") + params := make(map[string]string) + params["storagepool"] = storagePool + params["thickprovisioning"] = "false" + if len(f.anotherSystemID) > 0 { + params["systemID"] = f.anotherSystemID + } + req.Parameters = params + makeAUniqueName(&name) + req.Name = name + capacityRange := new(csi.CapacityRange) + capacityRange.RequiredBytes = 8 * 1024 * 1024 * 1024 + req.CapacityRange = capacityRange + + capability := new(csi.VolumeCapability) + block := new(csi.VolumeCapability_BlockVolume) + blockType := new(csi.VolumeCapability_Block) + blockType.Block = block + capability.AccessType = blockType + accessMode := new(csi.VolumeCapability_AccessMode) + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + capability.AccessMode = accessMode + f.capability = capability + capabilities := make([]*csi.VolumeCapability, 0) + capabilities = append(capabilities, capability) + req.VolumeCapabilities = capabilities + + return req +} + func FeatureContext(s *godog.ScenarioContext) { f := &feature{} s.Step(`^a VxFlexOS service$`, f.aVxFlexOSService) @@ -2812,4 +2881,6 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call ListFileSystemSnapshot$`, f.ICallListFileSystemSnapshot) s.Step(`^I call CreateSnapshotForFS$`, f.iCallCreateSnapshotForFS) s.Step(`^I call DeleteSnapshotForFS$`, f.iCallDeleteSnapshotForFS) + s.Step(`^I create a zone volume request "([^"]*)"$`, f.iCreateZoneRequest) + s.Step(`^I create an invalid zone volume request$`, f.iCreateInvalidZoneRequest) } From 42f6add454bee4da2a714a2b62d13d45ea526a58 Mon Sep 17 00:00:00 2001 From: Harshita Pandey Date: Thu, 5 Dec 2024 22:30:42 +0530 Subject: [PATCH 07/15] Added Integration tests for NodeGetInfo --- env.sh | 4 + test/integration/features/integration.feature | 13 ++ test/integration/integration_test.go | 21 +-- test/integration/step_defs_test.go | 152 +++++++++++++++--- 4 files changed, 161 insertions(+), 29 deletions(-) diff --git a/env.sh b/env.sh index 8b102d36..f4d85ddc 100644 --- a/env.sh +++ b/env.sh @@ -24,10 +24,14 @@ export NFS_STORAGE_POOL="" export SDC_GUID=$(/bin/emc/scaleio/drv_cfg --query_guid) # Alternate GUID is for another system for testing expose volume to multiple hosts export ALT_GUID= +export X_CSI_POWERFLEX_KUBE_NODE_NAME="node1" # Interface variables export NODE_INTERFACES="nodeName:interfaceName" +# Node Label variables +export ZONE_LABEL_KEY="" + #Debug variables for goscaleio library export GOSCALEIO_SHOWHTTP="true" diff --git a/test/integration/features/integration.feature b/test/integration/features/integration.feature index 4e1f7fcf..c03c51f7 100644 --- a/test/integration/features/integration.feature +++ b/test/integration/features/integration.feature @@ -1013,3 +1013,16 @@ Scenario: Create zone volume with invalid zone information Examples: | errormsg | | "no zone topology found in accessibility requirements" | + +@zone-integration +Scenario: call NodeGetInfo + Given a VxFlexOS service + And I call NodeGetInfo with "" + Then a NodeGetInfo is returned with zone topology + And a NodeGetInfo is returned with system topology + +@zone-integration +Scenario: call NodeGetInfo + Given a VxFlexOS service + And I call NodeGetInfo with "invalid_zone_key" + Then a NodeGetInfo is returned without zone topology "invalid_zone_key" \ No newline at end of file diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 6471e6b5..2a9d8e2b 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -31,21 +31,22 @@ import ( ) const ( - datafile = "/tmp/datafile" - datadir = "/tmp/datadir" - configFile = "../../config.json" + datafile = "/tmp/datafile" + datadir = "/tmp/datadir" + configFile = "../../config.json" + zoneConfigFile = "features/array-config/multi-az" ) var grpcClient *grpc.ClientConn -func init() { +func readConfigFile(configurationFile string) { /* load array config and give proper errors if not ok*/ - if _, err := os.Stat(configFile); err == nil { - if _, err := os.ReadFile(configFile); err != nil { - err = fmt.Errorf("DEBUG integration pre requisites missing %s with multi array configuration file ", configFile) + if _, err := os.Stat(configurationFile); err == nil { + if _, err := os.ReadFile(configurationFile); err != nil { + err = fmt.Errorf("DEBUG integration pre requisites missing %s with multi array configuration file ", configurationFile) panic(err) } - f, err := os.Open(configFile) + f, err := os.Open(configurationFile) r := bufio.NewReader(f) mdms := make([]string, 0) line, isPrefix, err := r.ReadLine() @@ -70,6 +71,7 @@ func init() { } func TestIntegration(t *testing.T) { + readConfigFile(configFile) var stop func() ctx := context.Background() fmt.Printf("calling startServer") @@ -118,10 +120,11 @@ func TestIntegration(t *testing.T) { } func TestZoneIntegration(t *testing.T) { + readConfigFile(zoneConfigFile) var stop func() ctx := context.Background() fmt.Printf("calling startServer") - grpcClient, stop = startServer(ctx, "features/array-config/multi-az") + grpcClient, stop = startServer(ctx, zoneConfigFile) fmt.Printf("back from startServer") time.Sleep(5 * time.Second) diff --git a/test/integration/step_defs_test.go b/test/integration/step_defs_test.go index 221d7d9b..c5fa13c4 100644 --- a/test/integration/step_defs_test.go +++ b/test/integration/step_defs_test.go @@ -79,6 +79,8 @@ type feature struct { createVolumeRequest *csi.CreateVolumeRequest publishVolumeRequest *csi.ControllerPublishVolumeRequest nodePublishVolumeRequest *csi.NodePublishVolumeRequest + nodeGetInfoRequest *csi.NodeGetInfoRequest + nodeGetInfoResponse *csi.NodeGetInfoResponse listVolumesResponse *csi.ListVolumesResponse listSnapshotsResponse *csi.ListSnapshotsResponse capability *csi.VolumeCapability @@ -103,7 +105,7 @@ func (f *feature) getGoscaleioClient() (client *goscaleio.Client, err error) { if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return nil, errors.New("Get multi array config failed " + err.Error()) } @@ -137,19 +139,19 @@ func (f *feature) getGoscaleioClient() (client *goscaleio.Client, err error) { // there is no way to call service.go methods from here // hence copy same method over there , this is used to get all arrays and pick different // systemID to test with see method iSetAnotherSystemID -func (f *feature) getArrayConfig() (map[string]*ArrayConnectionData, error) { +func (f *feature) getArrayConfig(configurationFile string) (map[string]*ArrayConnectionData, error) { arrays := make(map[string]*ArrayConnectionData) - _, err := os.Stat(configFile) + _, err := os.Stat(configurationFile) if err != nil { if os.IsNotExist(err) { - return nil, fmt.Errorf("File %s does not exist", configFile) + return nil, fmt.Errorf("File %s does not exist", configurationFile) } } - config, err := os.ReadFile(filepath.Clean(configFile)) + config, err := os.ReadFile(filepath.Clean(configurationFile)) if err != nil { - return nil, fmt.Errorf("File %s errors: %v", configFile, err) + return nil, fmt.Errorf("File %s errors: %v", configurationFile, err) } if string(config) != "" { @@ -160,7 +162,7 @@ func (f *feature) getArrayConfig() (map[string]*ArrayConnectionData, error) { } if len(jsonCreds) == 0 { - return nil, fmt.Errorf("no arrays are provided in configFile %s", configFile) + return nil, fmt.Errorf("no arrays are provided in configFile %s", configurationFile) } noOfDefaultArray := 0 @@ -219,7 +221,7 @@ func (f *feature) getArrayConfig() (map[string]*ArrayConnectionData, error) { arrays[c.SystemID] = ©OfCred } } else { - return nil, fmt.Errorf("arrays details are not provided in configFile %s", configFile) + return nil, fmt.Errorf("arrays details are not provided in configFile %s", configurationFile) } return arrays, nil } @@ -318,6 +320,8 @@ func (f *feature) aVxFlexOSService() error { f.errs = make([]error, 0) f.createVolumeRequest = nil f.publishVolumeRequest = nil + f.nodeGetInfoRequest = nil + f.nodeGetInfoResponse = nil f.listVolumesResponse = nil f.listSnapshotsResponse = nil f.capability = nil @@ -374,7 +378,7 @@ func (f *feature) aBasicNfsVolumeRequest(name string, size int64) error { if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -439,7 +443,7 @@ func (f *feature) aBasicNfsVolumeRequestWithSizeLessThan3Gi(name string, size in if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -504,7 +508,7 @@ func (f *feature) aNfsVolumeRequestWithQuota(volname string, volsize int64, path if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -1263,7 +1267,7 @@ func (f *feature) iSetAnotherSystemName(systemType string) error { if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -1294,7 +1298,7 @@ func (f *feature) iSetAnotherSystemID(systemType string) error { if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -2100,7 +2104,7 @@ func (f *feature) aBasicNfsVolumeRequestWithWrongNasName(name string, size int64 if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -2162,7 +2166,7 @@ func (f *feature) aNfsCapabilityWithVoltypeAccessFstype(voltype, access, fstype if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -2227,7 +2231,7 @@ func (f *feature) aNfsVolumeRequest(name string, size int64) error { if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -2327,7 +2331,7 @@ func (f *feature) controllerPublishVolumeForNfsWithoutSDC(id string) error { if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -2357,7 +2361,7 @@ func (f *feature) controllerPublishVolumeForNfs(id string, nodeIDEnvVar string) if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -2609,7 +2613,7 @@ func (f *feature) ICallListFileSystemSnapshot() error { if f.arrays == nil { fmt.Printf("Initialize ArrayConfig from %s:\n", configFile) var err error - f.arrays, err = f.getArrayConfig() + f.arrays, err = f.getArrayConfig(configFile) if err != nil { return errors.New("Get multi array config failed " + err.Error()) } @@ -2721,7 +2725,7 @@ func (f *feature) checkNFS(_ context.Context, systemID string) (bool, error) { return false, err } if ver >= 4.0 { - arrayConData, err := f.getArrayConfig() + arrayConData, err := f.getArrayConfig(configFile) if err != nil { return false, err } @@ -2734,6 +2738,110 @@ func (f *feature) checkNFS(_ context.Context, systemID string) (bool, error) { return false, nil } +func (f *feature) createFakeNodeLabels(zoneLabelKey string) error { + + if zoneLabelKey == "" { + zoneLabelKey = os.Getenv("ZONE_LABEL_KEY") + } + nodeName := os.Getenv("X_CSI_POWERFLEX_KUBE_NODE_NAME") + + node := &apiv1.Node{ + ObjectMeta: v1.ObjectMeta{ + Name: nodeName, + Labels: map[string]string{ + zoneLabelKey: "zoneA", + "kubernetes.io/hostname": nodeName, + }, + }, + Status: apiv1.NodeStatus{ + Conditions: []apiv1.NodeCondition{ + { + Type: apiv1.NodeReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + } + _, err := service.K8sClientset.CoreV1().Nodes().Create(context.TODO(), node, v1.CreateOptions{}) + if err != nil { + fmt.Printf("CreateNode returned error: %s\n", err.Error()) + return err + } + return nil +} + +func (f *feature) iCallNodeGetInfo(zoneLabelKey string) error { + fmt.Println("[iCallNodeGetInfo] Calling NodeGetInfo...") + _, err := f.nodeGetInfo(f.nodeGetInfoRequest, zoneLabelKey) + if err != nil { + fmt.Printf("NodeGetInfo returned error: %s\n", err.Error()) + f.addError(err) + } + return nil +} + +func (f *feature) nodeGetInfo(req *csi.NodeGetInfoRequest, zoneLabelKey string) (*csi.NodeGetInfoResponse, error) { + fmt.Println("[nodeGetInfo] Calling NodeGetInfo...") + + ctx := context.Background() + client := csi.NewNodeClient(grpcClient) + var nodeResp *csi.NodeGetInfoResponse + + clientSet := fake.NewSimpleClientset() + service.K8sClientset = clientSet + + err := f.createFakeNodeLabels(zoneLabelKey) + if err != nil { + return nil, fmt.Errorf("failed to create fake node labels: %v", err) + } + + nodeResp, err = client.NodeGetInfo(ctx, req) + f.nodeGetInfoResponse = nodeResp + return nodeResp, err +} + +func (f *feature) aNodeGetInfoIsReturnedWithZoneTopology() error { + accessibility := f.nodeGetInfoResponse.GetAccessibleTopology() + log.Printf("Node Accessibility %+v", accessibility) + if _, ok := accessibility.Segments[os.Getenv("ZONE_LABEL_KEY")]; !ok { + return fmt.Errorf("zone not found") + } + return nil +} + +func (f *feature) aNodeGetInfoIsReturnedWithoutZoneTopology(zoneLabelKey string) error { + accessibility := f.nodeGetInfoResponse.GetAccessibleTopology() + log.Printf("Node Accessibility %+v", accessibility) + if _, ok := accessibility.Segments[zoneLabelKey]; ok { + return fmt.Errorf("zone found") + } + return nil +} + +func (f *feature) aNodeGetInfoIsReturnedWithSystemTopology() error { + accessibility := f.nodeGetInfoResponse.GetAccessibleTopology() + log.Printf("Node Accessibility %+v", accessibility) + + var err error + f.arrays, err = f.getArrayConfig(zoneConfigFile) + if err != nil { + return fmt.Errorf("failed to get array config: %v", err) + } + + labelAdded := false + for _, array := range f.arrays { + log.Printf("array systemID %+v", array.SystemID) + if _, ok := accessibility.Segments[service.Name+"/"+array.SystemID]; ok { + labelAdded = true + } + } + + if !labelAdded { + return fmt.Errorf("topology with zone label not found") + } + return nil +} + func (f *feature) iCreateZoneRequest(name string) error { req := f.createGenericZoneRequest(name) req.AccessibilityRequirements = new(csi.TopologyRequirement) @@ -2883,4 +2991,8 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call DeleteSnapshotForFS$`, f.iCallDeleteSnapshotForFS) s.Step(`^I create a zone volume request "([^"]*)"$`, f.iCreateZoneRequest) s.Step(`^I create an invalid zone volume request$`, f.iCreateInvalidZoneRequest) + s.Step(`^I call NodeGetInfo with "([^"]*)"$`, f.iCallNodeGetInfo) + s.Step(`^a NodeGetInfo is returned with zone topology$`, f.aNodeGetInfoIsReturnedWithZoneTopology) + s.Step(`^a NodeGetInfo is returned without zone topology "([^"]*)"$`, f.aNodeGetInfoIsReturnedWithoutZoneTopology) + s.Step(`^a NodeGetInfo is returned with system topology$`, f.aNodeGetInfoIsReturnedWithSystemTopology) } From 628b128c4a4e4d09bfd5bb846e804c2137d322d1 Mon Sep 17 00:00:00 2001 From: Harshita Pandey Date: Thu, 5 Dec 2024 22:45:12 +0530 Subject: [PATCH 08/15] fixing linting checks --- test/integration/step_defs_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/step_defs_test.go b/test/integration/step_defs_test.go index c5fa13c4..ddac6607 100644 --- a/test/integration/step_defs_test.go +++ b/test/integration/step_defs_test.go @@ -2739,7 +2739,6 @@ func (f *feature) checkNFS(_ context.Context, systemID string) (bool, error) { } func (f *feature) createFakeNodeLabels(zoneLabelKey string) error { - if zoneLabelKey == "" { zoneLabelKey = os.Getenv("ZONE_LABEL_KEY") } From c79bc173d18601ec8cf9df104ae6993bffaa703d Mon Sep 17 00:00:00 2001 From: Harshita Pandey Date: Fri, 6 Dec 2024 19:09:07 +0530 Subject: [PATCH 09/15] Renaming function arguments --- test/integration/integration_test.go | 10 +++++----- test/integration/step_defs_test.go | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2a9d8e2b..b33b0cab 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -39,14 +39,14 @@ const ( var grpcClient *grpc.ClientConn -func readConfigFile(configurationFile string) { +func readConfigFile(filePath string) { /* load array config and give proper errors if not ok*/ - if _, err := os.Stat(configurationFile); err == nil { - if _, err := os.ReadFile(configurationFile); err != nil { - err = fmt.Errorf("DEBUG integration pre requisites missing %s with multi array configuration file ", configurationFile) + if _, err := os.Stat(filePath); err == nil { + if _, err := os.ReadFile(filePath); err != nil { + err = fmt.Errorf("DEBUG integration pre requisites missing %s with multi array configuration file ", filePath) panic(err) } - f, err := os.Open(configurationFile) + f, err := os.Open(filePath) r := bufio.NewReader(f) mdms := make([]string, 0) line, isPrefix, err := r.ReadLine() diff --git a/test/integration/step_defs_test.go b/test/integration/step_defs_test.go index ddac6607..99ba774d 100644 --- a/test/integration/step_defs_test.go +++ b/test/integration/step_defs_test.go @@ -139,19 +139,19 @@ func (f *feature) getGoscaleioClient() (client *goscaleio.Client, err error) { // there is no way to call service.go methods from here // hence copy same method over there , this is used to get all arrays and pick different // systemID to test with see method iSetAnotherSystemID -func (f *feature) getArrayConfig(configurationFile string) (map[string]*ArrayConnectionData, error) { +func (f *feature) getArrayConfig(filePath string) (map[string]*ArrayConnectionData, error) { arrays := make(map[string]*ArrayConnectionData) - _, err := os.Stat(configurationFile) + _, err := os.Stat(filePath) if err != nil { if os.IsNotExist(err) { - return nil, fmt.Errorf("File %s does not exist", configurationFile) + return nil, fmt.Errorf("File %s does not exist", filePath) } } - config, err := os.ReadFile(filepath.Clean(configurationFile)) + config, err := os.ReadFile(filepath.Clean(filePath)) if err != nil { - return nil, fmt.Errorf("File %s errors: %v", configurationFile, err) + return nil, fmt.Errorf("File %s errors: %v", filePath, err) } if string(config) != "" { @@ -162,7 +162,7 @@ func (f *feature) getArrayConfig(configurationFile string) (map[string]*ArrayCon } if len(jsonCreds) == 0 { - return nil, fmt.Errorf("no arrays are provided in configFile %s", configurationFile) + return nil, fmt.Errorf("no arrays are provided in configFile %s", filePath) } noOfDefaultArray := 0 @@ -221,7 +221,7 @@ func (f *feature) getArrayConfig(configurationFile string) (map[string]*ArrayCon arrays[c.SystemID] = ©OfCred } } else { - return nil, fmt.Errorf("arrays details are not provided in configFile %s", configurationFile) + return nil, fmt.Errorf("arrays details are not provided in configFile %s", filePath) } return arrays, nil } From 349559594e562a595b00da3d75c6526a0cb2b838 Mon Sep 17 00:00:00 2001 From: Fernando Alfaro Campos Date: Mon, 9 Dec 2024 15:19:29 -0500 Subject: [PATCH 10/15] Add Snapshot and Clone Support for Multi-Available Zone (#365) * Add snapshot check and topology add during zone volume creation * Add topology checks for clones * Add zone snapshot and restore e2e test * Address failed PR checks * Update README * Added e2e tests for clones * Add snap and clone test (#371) * Address PR comments --------- Co-authored-by: Trevor Dawe Co-authored-by: Bharath Sreekanth <93715158+bharathsreekanth@users.noreply.github.com> --- service/controller.go | 44 ++++- service/features/service.feature | 26 +++ service/step_defs_test.go | 40 +++- test/e2e/README.md | 1 + test/e2e/e2e.go | 206 ++++++++++++++++++++- test/e2e/features/e2e.feature | 29 +++ test/e2e/templates/zone-wait/clone.yaml | 41 ++++ test/e2e/templates/zone-wait/snapshot.yaml | 51 +++++ 8 files changed, 424 insertions(+), 14 deletions(-) create mode 100644 test/e2e/templates/zone-wait/clone.yaml create mode 100644 test/e2e/templates/zone-wait/snapshot.yaml diff --git a/service/controller.go b/service/controller.go index 42882535..83a66219 100644 --- a/service/controller.go +++ b/service/controller.go @@ -221,6 +221,23 @@ func (s *service) CreateVolume( // Handle Zone topology, which happens when node is annotated with a matching zone label if len(zoneTargetMap) != 0 && accessibility != nil && len(accessibility.GetPreferred()) > 0 { + contentSource := req.GetVolumeContentSource() + var sourceSystemID string + if contentSource != nil { + Log.Infof("[CreateVolume] Zone volume has a content source - we are a snapshot or clone: %+v", contentSource) + + snapshotSource := contentSource.GetSnapshot() + cloneSource := contentSource.GetVolume() + + if snapshotSource != nil { + sourceSystemID = s.getSystemIDFromCsiVolumeID(snapshotSource.SnapshotId) + Log.Infof("[CreateVolume] Zone snapshot source systemID: %s", sourceSystemID) + } else if cloneSource != nil { + sourceSystemID = s.getSystemIDFromCsiVolumeID(cloneSource.VolumeId) + Log.Infof("[CreateVolume] Zone clone source systemID: %s", sourceSystemID) + } + } + for _, topo := range accessibility.GetPreferred() { for topoLabel, zoneName := range topo.Segments { Log.Infof("Zoning based on label %s", s.opts.zoneLabelKey) @@ -231,12 +248,14 @@ func (s *service) CreateVolume( continue } + if sourceSystemID != "" && zoneTarget.systemID != sourceSystemID { + continue + } + protectionDomain = string(zoneTarget.protectionDomain) storagePool = string(zoneTarget.pool) systemID = zoneTarget.systemID - Log.Infof("Preferred topology zone %s, systemID %s, protectionDomain %s, and storagePool %s", zoneName, systemID, protectionDomain, storagePool) - if err := s.requireProbe(ctx, systemID); err != nil { Log.Errorln("Failed to probe system " + systemID) continue @@ -247,6 +266,8 @@ func (s *service) CreateVolume( Segments: systemSegments, }) + // We found a zone topology + Log.Infof("Preferred topology zone %s, systemID %s, protectionDomain %s, and storagePool %s", zoneName, systemID, protectionDomain, storagePool) zoneTopology = true } } @@ -552,13 +573,26 @@ func (s *service) CreateVolume( if contentSource != nil { volumeSource := contentSource.GetVolume() if volumeSource != nil { - Log.Printf("volume %s specified as volume content source", volumeSource.VolumeId) - return s.Clone(req, volumeSource, name, size, storagePool) + cloneResponse, err := s.Clone(req, volumeSource, name, size, storagePool) + if err != nil { + return nil, err + } + + cloneResponse.Volume.AccessibleTopology = volumeTopology + + return cloneResponse, nil } snapshotSource := contentSource.GetSnapshot() if snapshotSource != nil { Log.Printf("snapshot %s specified as volume content source", snapshotSource.SnapshotId) - return s.createVolumeFromSnapshot(req, snapshotSource, name, size, storagePool) + snapshotVolumeResponse, err := s.createVolumeFromSnapshot(req, snapshotSource, name, size, storagePool) + if err != nil { + return nil, err + } + + snapshotVolumeResponse.Volume.AccessibleTopology = volumeTopology + + return snapshotVolumeResponse, nil } } diff --git a/service/features/service.feature b/service/features/service.feature index d7c3545b..a8ca2429 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -1580,3 +1580,29 @@ Feature: VxFlex OS CSI interface | config | | "multi_az" | | "multi_az_custom_labels" | + + Scenario: Snapshot a single volume in zone + Given a VxFlexOS service + And I use config + When I call Probe + And I call CreateVolume "volume1" with zones + And a valid CreateVolumeResponse is returned + And I call CreateSnapshot + Then a valid CreateSnapshotResponse is returned + And I call Create Volume for zones from Snapshot + Then a valid CreateVolumeResponse is returned + Examples: + | name | config | errorMsg | + | "snap1" | "multi_az" | "none" | + + Scenario: Clone a single volume in zone + Given a VxFlexOS service + And I use config + When I call Probe + And I call CreateVolume with zones + And a valid CreateVolumeResponse is returned + And I call Clone volume for zones + Then a valid CreateVolumeResponse is returned + Examples: + | name | config | errorMsg | + | "volume1" | "multi_az" | "none" | diff --git a/service/step_defs_test.go b/service/step_defs_test.go index ced2869e..9cdefbbd 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -3815,6 +3815,39 @@ func (f *feature) iCallCreateVolumeFromSnapshot() error { return nil } +func (f *feature) iCallCreateVolumeForZonesFromSnapshot(snapshotID string) error { + ctx := new(context.Context) + req := getZoneEnabledRequest(f.service.opts.zoneLabelKey) + req.Name = "volumeForZonesFromSnap " + + source := &csi.VolumeContentSource_SnapshotSource{SnapshotId: snapshotID} + req.VolumeContentSource = new(csi.VolumeContentSource) + req.VolumeContentSource.Type = &csi.VolumeContentSource_Snapshot{Snapshot: source} + f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + if f.err != nil { + fmt.Printf("Error on CreateVolume for zones from snap: %s\n", f.err.Error()) + } + return nil +} + +func (f *feature) iCallCloneVolumeForZones(volumeID string) error { + ctx := new(context.Context) + req := getZoneEnabledRequest(f.service.opts.zoneLabelKey) + req.Name = "clone" + + source := &csi.VolumeContentSource_VolumeSource{VolumeId: volumeID} + req.VolumeContentSource = new(csi.VolumeContentSource) + req.VolumeContentSource.Type = &csi.VolumeContentSource_Volume{Volume: source} + req.AccessibilityRequirements = new(csi.TopologyRequirement) + fmt.Printf("CallCloneVolumeForZones with request = %v", req) + f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + if f.err != nil { + fmt.Printf("Error on CreateVolume from volume: %s\n", f.err.Error()) + } + + return nil +} + func (f *feature) iCallCreateVolumeFromSnapshotNFS() error { ctx := new(context.Context) req := getTypicalNFSCreateVolumeRequest() @@ -4688,6 +4721,7 @@ func (f *feature) iCallPingNASServer(systemID string, name string) error { func getZoneEnabledRequest(zoneLabelName string) *csi.CreateVolumeRequest { req := new(csi.CreateVolumeRequest) params := make(map[string]string) + params["storagepool"] = "viki_pool_HDD_20181031" req.Parameters = params capacityRange := new(csi.CapacityRange) capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024 @@ -4718,11 +4752,11 @@ func (f *feature) iCallCreateVolumeWithZones(name string) error { req := f.createVolumeRequest req.Name = name - fmt.Println("I am in iCallCreateVolume fn.....") + fmt.Printf("I am in iCallCreateVolume with zones fn with req => ..... %v ...", req) f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) if f.err != nil { - log.Printf("CreateVolume called failed: %s\n", f.err.Error()) + log.Printf("CreateVolume with zones called failed: %s\n", f.err.Error()) } if f.createVolumeResponse != nil { @@ -4905,6 +4939,8 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call DeleteSnapshot NFS$`, f.iCallDeleteSnapshotNFS) s.Step(`^a valid snapshot consistency group$`, f.aValidSnapshotConsistencyGroup) s.Step(`^I call Create Volume from Snapshot$`, f.iCallCreateVolumeFromSnapshot) + s.Step(`^I call Create Volume for zones from Snapshot "([^"]*)"$`, f.iCallCreateVolumeForZonesFromSnapshot) + s.Step(`^I call Clone volume for zones "([^"]*)"$`, f.iCallCloneVolumeForZones) s.Step(`^I call Create Volume from SnapshotNFS$`, f.iCallCreateVolumeFromSnapshotNFS) s.Step(`^the wrong capacity$`, f.theWrongCapacity) s.Step(`^the wrong storage pool$`, f.theWrongStoragePool) diff --git a/test/e2e/README.md b/test/e2e/README.md index 77416699..6225317f 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -22,3 +22,4 @@ The following tests are implemented and run during the `zone-e2e` test. 1. Creates a stateful set of 7 replicas and ensures that everything is up and ready with the zone configuration. 2. Cordons a node (marks it as unschedulable), creates 7 volumes/pods, and ensures that none gets scheduled on the cordoned node. +3. Creates a stateful set of 7 replicas, creates a snapshot and restore pod for each and ensure that they are all running. diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 26611d17..d689a516 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -20,6 +20,7 @@ import ( "fmt" "log" "os/exec" + "strconv" "strings" "time" @@ -38,11 +39,12 @@ const ( ) type feature struct { - errs []error - zoneNodeMapping map[string]string - zoneKey string - supportedZones []string - cordonedNode string + errs []error + zoneNodeMapping map[string]string + zoneKey string + supportedZones []string + cordonedNode string + zoneReplicaCount int32 } func (f *feature) aVxFlexOSService() error { @@ -214,13 +216,13 @@ func (f *feature) deleteZoneVolumes(fileLocation string) error { } func (f *feature) checkStatfulSetStatus() error { - log.Println("[checkStatfulSetStatus] checking statefulset status") - err := f.isStatefulSetReady() if err != nil { return err } + log.Println("[checkStatfulSetStatus] Statefulset and zone pods are ready") + return nil } @@ -244,6 +246,7 @@ func (f *feature) isStatefulSetReady() error { // Everything should be ready. if *sts.Spec.Replicas == sts.Status.ReadyReplicas { ready = true + f.zoneReplicaCount = sts.Status.ReadyReplicas break } @@ -339,6 +342,190 @@ func (f *feature) checkPodsForCordonRun() error { return nil } +func (f *feature) createZoneSnapshotsAndRestore(location string) error { + log.Println("[createZoneSnapshotsAndRestore] Creating snapshots and restores") + templateFile := "templates/" + location + "/snapshot.yaml" + updatedTemplateFile := "templates/" + location + "/snapshot-updated.yaml" + + for i := 0; i < int(f.zoneReplicaCount); i++ { + time.Sleep(10 * time.Second) + + cpCmd := "cp " + templateFile + " " + updatedTemplateFile + b, err := execLocalCommand(cpCmd) + if err != nil { + return fmt.Errorf("failed to copy template file: %v\nErrMessage:\n%s", err, string(b)) + } + + // Update iteration and apply... + err = replaceInFile("ITERATION", strconv.Itoa(i), updatedTemplateFile) + if err != nil { + return err + } + + createSnapshot := "kubectl apply -f " + updatedTemplateFile + _, err = execLocalCommand(createSnapshot) + if err != nil { + return err + } + } + + log.Println("[createZoneSnapshotsAndRestore] Snapshots and restores created") + + return nil +} + +func (f *feature) createZoneClonesAndRestore(location string) error { + log.Println("[createZoneClonesAndRestore] Creating clones and restores") + templateFile := "templates/" + location + "/clone.yaml" + updatedTemplateFile := "templates/" + location + "/clone-updated.yaml" + + for i := 0; i < int(f.zoneReplicaCount); i++ { + time.Sleep(10 * time.Second) + + cpCmd := "cp " + templateFile + " " + updatedTemplateFile + b, err := execLocalCommand(cpCmd) + if err != nil { + return fmt.Errorf("failed to copy template file: %v\nErrMessage:\n%s", err, string(b)) + } + + // Update iteration and apply... + err = replaceInFile("ITERATION", strconv.Itoa(i), updatedTemplateFile) + if err != nil { + return err + } + + createClone := "kubectl apply -f " + updatedTemplateFile + _, err = execLocalCommand(createClone) + if err != nil { + return err + } + } + + log.Println("[createZoneClonesAndRestore] Clones and restores created") + + return nil +} + +func (f *feature) deleteZoneSnapshotsAndRestore(location string) error { + log.Println("[deleteZoneSnapshotsAndRestore] Deleting restores and snapshots") + templateFile := "templates/" + location + "/snapshot.yaml" + updatedTemplateFile := "templates/" + location + "/snapshot-updated.yaml" + + for i := 0; i < int(f.zoneReplicaCount); i++ { + time.Sleep(10 * time.Second) + + cpCmd := "cp " + templateFile + " " + updatedTemplateFile + b, err := execLocalCommand(cpCmd) + if err != nil { + return fmt.Errorf("failed to copy template file: %v\nErrMessage:\n%s", err, string(b)) + } + + // Update iteration and apply... + err = replaceInFile("ITERATION", strconv.Itoa(i), updatedTemplateFile) + if err != nil { + return err + } + + deleteSnapshot := "kubectl delete -f " + updatedTemplateFile + _, err = execLocalCommand(deleteSnapshot) + if err != nil { + return err + } + } + + log.Println("[deleteZoneSnapshotsAndRestore] Snapshots and restores deleted") + + return nil +} + +func (f *feature) deleteZoneClonesAndRestore(location string) error { + log.Println("[deleteZoneClonesAndRestore] Deleting restores and clones") + templateFile := "templates/" + location + "/clone.yaml" + updatedTemplateFile := "templates/" + location + "/clone-updated.yaml" + + for i := 0; i < int(f.zoneReplicaCount); i++ { + time.Sleep(10 * time.Second) + + cpCmd := "cp " + templateFile + " " + updatedTemplateFile + b, err := execLocalCommand(cpCmd) + if err != nil { + return fmt.Errorf("failed to copy template file: %v\nErrMessage:\n%s", err, string(b)) + } + + // Update iteration and apply... + err = replaceInFile("ITERATION", strconv.Itoa(i), updatedTemplateFile) + if err != nil { + return err + } + + deleteClone := "kubectl delete -f " + updatedTemplateFile + _, err = execLocalCommand(deleteClone) + if err != nil { + return err + } + } + + log.Println("[deleteZoneClonesAndRestore] Clones and restores deleted") + + return nil +} + +func (f *feature) areAllRestoresRunning() error { + log.Println("[areAllRestoresRunning] Checking if all restores are running") + + complete := false + attempts := 0 + for attempts < 15 { + getZonePods := "kubectl get pods -n " + testNamespace + " -o jsonpath='{.items}'" + result, err := execLocalCommand(getZonePods) + if err != nil { + return err + } + + pods := []v1Core.Pod{} + err = json.Unmarshal(result, &pods) + if err != nil { + return err + } + + runningCount := 0 + for _, pod := range pods { + if !strings.Contains(pod.ObjectMeta.Name, "maz-restore") { + continue + } + + if pod.Status.Phase == "Running" { + runningCount++ + } + } + + if runningCount != int(f.zoneReplicaCount) { + time.Sleep(10 * time.Second) + continue + } + + complete = true + break + + } + + if !complete { + return fmt.Errorf("all restores not running, check pods status containing maz-restore and then try again") + } + + return nil +} + +func replaceInFile(oldString, newString, templateFile string) error { + cmdString := "s|" + oldString + "|" + newString + "|g" + replaceCmd := fmt.Sprintf("sed -i '%s' %s", cmdString, templateFile) + _, err := execLocalCommand(replaceCmd) + if err != nil { + return fmt.Errorf("failed to substitute %s with %s in file %s: %s", oldString, newString, templateFile, err.Error()) + } + return nil +} + func execLocalCommand(command string) ([]byte, error) { var buf bytes.Buffer cmd := exec.Command("bash", "-c", command) @@ -364,4 +551,9 @@ func InitializeScenario(s *godog.ScenarioContext) { s.Step(`^check the statefulset for zones$`, f.checkStatfulSetStatus) s.Step(`^cordon one node$`, f.cordonNode) s.Step(`^ensure pods aren't scheduled incorrectly and still running$`, f.checkPodsForCordonRun) + s.Step(`^create snapshots for zone volumes and restore in "([^"]*)"$`, f.createZoneSnapshotsAndRestore) + s.Step(`^delete snapshots for zone volumes and restore in "([^"]*)"$`, f.deleteZoneSnapshotsAndRestore) + s.Step(`^all zone restores are running$`, f.areAllRestoresRunning) + s.Step(`^create clones for zone volumes and restore in "([^"]*)"$`, f.createZoneClonesAndRestore) + s.Step(`^delete clones for zone volumes and restore in "([^"]*)"$`, f.deleteZoneClonesAndRestore) } diff --git a/test/e2e/features/e2e.feature b/test/e2e/features/e2e.feature index aa73d824..17bf5ffe 100644 --- a/test/e2e/features/e2e.feature +++ b/test/e2e/features/e2e.feature @@ -29,3 +29,32 @@ Feature: VxFlex OS CSI interface | secret | namespace | location | | "vxflexos-config" | "vxflexos" | "zone-wait" | + @zone + Scenario: Create zone voume and snapshots + Given a VxFlexOS service + And verify driver is configured and running correctly + And verify zone information from secret in namespace + Then create zone volume and pod in + And check the statefulset for zones + Then create snapshots for zone volumes and restore in + And all zone restores are running + Then delete snapshots for zone volumes and restore in + Then delete zone volume and pod in + Examples: + | secret | namespace | location | + | "vxflexos-config" | "vxflexos" | "zone-wait" | + + @zone + Scenario: Create zone volume and clones + Given a VxFlexOS service + And verify driver is configured and running correctly + And verify zone information from secret in namespace + Then create zone volume and pod in + And check the statefulset for zones + Then create clones for zone volumes and restore in + And all zone restores are running + Then delete clones for zone volumes and restore in + Then delete zone volume and pod in + Examples: + | secret | namespace | location | + | "vxflexos-config" | "vxflexos" | "zone-wait" | \ No newline at end of file diff --git a/test/e2e/templates/zone-wait/clone.yaml b/test/e2e/templates/zone-wait/clone.yaml new file mode 100644 index 00000000..4e1162d6 --- /dev/null +++ b/test/e2e/templates/zone-wait/clone.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: clone-maz-pvcITERATION + namespace: vxflexos-test +spec: + storageClassName: vxflexos-az-wait + dataSource: + name: multi-az-pvc-vxflextest-az-ITERATION + kind: PersistentVolumeClaim + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: clone-maz-restore-podITERATION + namespace: vxflexos-test +spec: + containers: + - name: busybox + image: quay.io/quay/busybox:latest + command: ["/bin/sleep", "3600"] + volumeMounts: + - mountPath: "/data0" + name: multi-az-pvc + resources: + requests: + cpu: "100m" + memory: "128Mi" + limits: + cpu: "200m" + memory: "256Mi" + volumes: + - name: multi-az-pvc + persistentVolumeClaim: + claimName: clone-maz-pvcITERATION diff --git a/test/e2e/templates/zone-wait/snapshot.yaml b/test/e2e/templates/zone-wait/snapshot.yaml new file mode 100644 index 00000000..6d325ef5 --- /dev/null +++ b/test/e2e/templates/zone-wait/snapshot.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshot +metadata: + name: snapshot-maz-ITERATION + namespace: vxflexos-test +spec: + volumeSnapshotClassName: vxflexos-snapclass + source: + persistentVolumeClaimName: multi-az-pvc-vxflextest-az-ITERATION +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: snapshot-maz-pvcITERATION + namespace: vxflexos-test +spec: + dataSource: + name: snapshot-maz-ITERATION + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: snapshot-maz-restore-podITERATION + namespace: vxflexos-test +spec: + containers: + - name: busybox + image: quay.io/quay/busybox:latest + command: ["/bin/sleep", "3600"] + volumeMounts: + - mountPath: "/data0" + name: multi-az-pvc + resources: + requests: + cpu: "100m" + memory: "128Mi" + limits: + cpu: "200m" + memory: "256Mi" + volumes: + - name: multi-az-pvc + persistentVolumeClaim: + claimName: snapshot-maz-pvcITERATION From fc70ca93f8f7eebaddc3d4a51ada4ef89dce93d6 Mon Sep 17 00:00:00 2001 From: lukeatdell <115811384+lukeatdell@users.noreply.github.com> Date: Wed, 18 Dec 2024 15:16:35 -0600 Subject: [PATCH 11/15] Driver Node should only ping arrays within the zone on which the pod is scheduled (#378) * configure driver node service to only ping arrays in the same zone as the service. Co-authored-by: Fernando Alfaro Campos Co-authored-by: Trevor Dawe --- go.mod | 3 +- go.sum | 14 +- service/controller.go | 94 ++++++++-- service/controller_test.go | 266 ++++++++++++++++++++++++++++ service/features/service.feature | 42 +++-- service/identity.go | 10 +- service/node.go | 5 + service/service.go | 64 ++++++- service/service_test.go | 222 ++++++++++++++++++++++- service/step_defs_test.go | 292 ++++++++++++++++++------------- 10 files changed, 858 insertions(+), 154 deletions(-) create mode 100644 service/controller_test.go diff --git a/go.mod b/go.mod index bfc92582..a42cc231 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,6 @@ module github.com/dell/csi-vxflexos/v2 go 1.23 require ( - github.com/akutz/memconn v0.1.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/container-storage-interface/spec v1.6.0 github.com/cucumber/godog v0.15.0 @@ -16,7 +15,7 @@ require ( github.com/dell/dell-csi-extensions/volumeGroupSnapshot v1.7.0 github.com/dell/gocsi v1.12.0 github.com/dell/gofsutil v1.17.0 - github.com/dell/goscaleio v1.17.1 + github.com/dell/goscaleio v1.17.2-0.20241218182509-936b677c46d5 github.com/fsnotify/fsnotify v1.8.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 diff --git a/go.sum b/go.sum index ad8d59db..b7c71831 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,6 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0= github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84= -github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A= -github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -108,8 +106,16 @@ github.com/dell/gocsi v1.12.0 h1:Dn/8f2BLovo57T/aC5pP/4Eqz4h6WX8SbX+hxT5NlvQ= github.com/dell/gocsi v1.12.0/go.mod h1:hJURrmDrXDGW4xVtgi5Kx6zUsU3ht9l+nlreNx33rf0= github.com/dell/gofsutil v1.17.0 h1:QA6gUb1mz8kXNEN4eEx47OHCz8nSqZrrCnaDUYmV5EY= github.com/dell/gofsutil v1.17.0/go.mod h1:PN2hWl/pVLQiTsFR0X1x+GfhfOrfW8pGgH5xGcGMeFs= -github.com/dell/goscaleio v1.17.1 h1:0gwR1c55ij3xVu/ARDWQNxBKCRlxMmg61n+5gKBX3v8= -github.com/dell/goscaleio v1.17.1/go.mod h1:7bX3rL8JWMmdifGr/UeD/Ju9wbkHUqvKDrbdu7XyGm8= +github.com/dell/goscaleio v1.17.2-0.20241209165307-dcbadc33ab2e h1:Y+F8YP3ceH6XRz0phFV5VpS2Pmoi8f0Vtg261/C2pZo= +github.com/dell/goscaleio v1.17.2-0.20241209165307-dcbadc33ab2e/go.mod h1:7bX3rL8JWMmdifGr/UeD/Ju9wbkHUqvKDrbdu7XyGm8= +github.com/dell/goscaleio v1.17.2-0.20241213145027-141cfe292cfa h1:9honWWT9xEcI0OWyLtiWIDCaMAEpBAeyyzW+KPRVh10= +github.com/dell/goscaleio v1.17.2-0.20241213145027-141cfe292cfa/go.mod h1:7bX3rL8JWMmdifGr/UeD/Ju9wbkHUqvKDrbdu7XyGm8= +github.com/dell/goscaleio v1.17.2-0.20241213204026-19006b56eb26 h1:Kg6MSwBmAlmUDWRKiG0YJRv1xd8qi9+mW7vAVNnghj4= +github.com/dell/goscaleio v1.17.2-0.20241213204026-19006b56eb26/go.mod h1:7bX3rL8JWMmdifGr/UeD/Ju9wbkHUqvKDrbdu7XyGm8= +github.com/dell/goscaleio v1.17.2-0.20241213215244-2164caaef4ab h1:DYWY7fs8v1VbmzF2pAx7peZtKhkppW5NCIyHCJN1fS4= +github.com/dell/goscaleio v1.17.2-0.20241213215244-2164caaef4ab/go.mod h1:7bX3rL8JWMmdifGr/UeD/Ju9wbkHUqvKDrbdu7XyGm8= +github.com/dell/goscaleio v1.17.2-0.20241218182509-936b677c46d5 h1:d7DwHvp7/hESR742f4iurtH3nHHSGPvnMadujZA2hsU= +github.com/dell/goscaleio v1.17.2-0.20241218182509-936b677c46d5/go.mod h1:2BsR92dYYnSmbZ34ixYdsucfyoQBDlbhbUUKnv6WalQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= diff --git a/service/controller.go b/service/controller.go index 83a66219..d87c7e02 100644 --- a/service/controller.go +++ b/service/controller.go @@ -2270,13 +2270,16 @@ func (s *service) getSystemCapacity(ctx context.Context, systemID, protectionDom adminClient := s.adminClients[systemID] system := s.systems[systemID] + if adminClient == nil || system == nil { + return 0, fmt.Errorf("can't find adminClient or system by id %s", systemID) + } var statsFunc func() (*siotypes.Statistics, error) // Default to get Capacity of system statsFunc = system.GetStatistics - if len(spName) > 0 { + if len(spName) > 0 && spName[0] != "" { // if storage pool is given, get capacity of storage pool pdID, err := s.getProtectionDomainIDFromName(systemID, protectionDomain) if err != nil { @@ -2369,6 +2372,15 @@ func (s *service) GetCapacity( } } + // If using availability zones, get capacity for the system in the zone + // using accessible topology parameter from k8s. + if s.opts.zoneLabelKey != "" { + systemID, err = s.getSystemIDFromZoneLabelKey(req) + if err != nil { + return nil, status.Errorf(codes.Internal, "%s", err.Error()) + } + } + if systemID == "" { // Get capacity of storage pool spname in all systems, return total capacity capacity, err = s.getCapacityForAllSystems(ctx, "", spname) @@ -2411,6 +2423,28 @@ func (s *service) GetCapacity( }, nil } +// getSystemIDFromZoneLabelKey returns the system ID associated with the zoneLabelKey if zoneLabelKey is set and +// contains an associated zone name. Returns an empty string otherwise. +func (s *service) getSystemIDFromZoneLabelKey(req *csi.GetCapacityRequest) (systemID string, err error) { + zoneName, ok := req.AccessibleTopology.Segments[s.opts.zoneLabelKey] + if !ok { + Log.Infof("could not get availability zone from accessible topology. Getting capacity for all systems") + return "", nil + } + + // find the systemID with the matching zone name + for _, array := range s.opts.arrays { + if zoneName == string(array.AvailabilityZone.Name) { + systemID = array.SystemID + break + } + } + if systemID == "" { + return "", fmt.Errorf("could not find an array assigned to zone '%s'", zoneName) + } + return systemID, nil +} + func (s *service) getMaximumVolumeSize(systemID string) (int64, error) { valueInCache, found := getCachedMaximumVolumeSize(systemID) if !found || valueInCache < 0 { @@ -2558,15 +2592,51 @@ func (s *service) ControllerGetCapabilities( }, nil } +func (s *service) getZoneFromZoneLabelKey(ctx context.Context, zoneLabelKey string) (zone string, err error) { + // get labels for this service, s + labels, err := GetNodeLabels(ctx, s) + if err != nil { + return "", err + } + + Log.Infof("Listing labels: %v", labels) + + // get the zone name from the labels + if val, ok := labels[zoneLabelKey]; ok { + return val, nil + } + + return "", fmt.Errorf("label %s not found", zoneLabelKey) +} + // systemProbeAll will iterate through all arrays in service.opts.arrays and probe them. If failed, it logs // the failed system name func (s *service) systemProbeAll(ctx context.Context) error { // probe all arrays - Log.Infof("Probing all arrays. Number of arrays: %d", len(s.opts.arrays)) + Log.Infoln("Probing all associated arrays") allArrayFail := true errMap := make(map[string]error) + zoneName := "" + usingZones := s.opts.zoneLabelKey != "" && s.isNodeMode() + + if usingZones { + var err error + zoneName, err = s.getZoneFromZoneLabelKey(ctx, s.opts.zoneLabelKey) + if err != nil { + return err + } + Log.Infof("probing zoneLabel '%s', zone value: '%s'", s.opts.zoneLabelKey, zoneName) + } for _, array := range s.opts.arrays { + // If zone information is available, use it to probe the array + if usingZones && !array.isInZone(zoneName) { + // Driver node containers should not probe arrays that exist outside their assigned zone + // Driver controller container should probe all arrays + Log.Infof("array %s zone %s does not match %s, not pinging this array\n", array.SystemID, array.AvailabilityZone.Name, zoneName) + continue + } + err := s.systemProbe(ctx, array) systemID := array.SystemID if err == nil { @@ -2587,23 +2657,23 @@ func (s *service) systemProbeAll(ctx context.Context) error { } // systemProbe will probe the given array -func (s *service) systemProbe(_ context.Context, array *ArrayConnectionData) error { +func (s *service) systemProbe(ctx context.Context, array *ArrayConnectionData) error { // Check that we have the details needed to login to the Gateway if array.Endpoint == "" { return status.Error(codes.FailedPrecondition, - "missing VxFlexOS Gateway endpoint") + "missing PowerFlex Gateway endpoint") } if array.Username == "" { return status.Error(codes.FailedPrecondition, - "missing VxFlexOS MDM user") + "missing PowerFlex MDM user") } if array.Password == "" { return status.Error(codes.FailedPrecondition, - "missing VxFlexOS MDM password") + "missing PowerFlex MDM password") } if array.SystemID == "" { return status.Error(codes.FailedPrecondition, - "missing VxFlexOS system name") + "missing PowerFlex system name") } var altSystemNames []string if array.AllSystemNames != "" { @@ -2626,25 +2696,27 @@ func (s *service) systemProbe(_ context.Context, array *ArrayConnectionData) err } } + Log.Printf("Login to PowerFlex Gateway, system=%s, endpoint=%s, user=%s\n", systemID, array.Endpoint, array.Username) + if s.adminClients[systemID].GetToken() == "" { - _, err := s.adminClients[systemID].Authenticate(&goscaleio.ConfigConnect{ + _, err := s.adminClients[systemID].WithContext(ctx).Authenticate(&goscaleio.ConfigConnect{ Endpoint: array.Endpoint, Username: array.Username, Password: array.Password, }) if err != nil { return status.Errorf(codes.FailedPrecondition, - "unable to login to VxFlexOS Gateway: %s", err.Error()) + "unable to login to PowerFlex Gateway: %s", err.Error()) } } // initialize system if needed if s.systems[systemID] == nil { - system, err := s.adminClients[systemID].FindSystem( + system, err := s.adminClients[systemID].WithContext(ctx).FindSystem( array.SystemID, array.SystemID, "") if err != nil { return status.Errorf(codes.FailedPrecondition, - "unable to find matching VxFlexOS system name: %s", + "unable to find matching PowerFlex system name: %s", err.Error()) } s.systems[systemID] = system diff --git a/service/controller_test.go b/service/controller_test.go new file mode 100644 index 00000000..3fa6f13d --- /dev/null +++ b/service/controller_test.go @@ -0,0 +1,266 @@ +// Copyright © 2024 Dell Inc. or its subsidiaries. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +import ( + "errors" + "sync" + "testing" + + csi "github.com/container-storage-interface/spec/lib/go/csi" + sio "github.com/dell/goscaleio" + siotypes "github.com/dell/goscaleio/types/v1" + "golang.org/x/net/context" +) + +func Test_service_getZoneFromZoneLabelKey(t *testing.T) { + type fields struct { + opts Opts + adminClients map[string]*sio.Client + systems map[string]*sio.System + mode string + volCache []*siotypes.Volume + volCacheSystemID string + snapCache []*siotypes.Volume + snapCacheSystemID string + privDir string + storagePoolIDToName map[string]string + statisticsCounter int + volumePrefixToSystems map[string][]string + connectedSystemNameToID map[string]string + } + + type args struct { + ctx context.Context + zoneLabelKey string + } + + const validTopologyKey = "topology.kubernetes.io/zone" + const validZone = "zoneA" + + tests := map[string]struct { + fields fields + args args + wantZone string + wantErr bool + getNodeLabelFunc func(ctx context.Context, s *service) (map[string]string, error) + }{ + "success": { + // happy path test + args: args{ + ctx: context.Background(), + zoneLabelKey: validTopologyKey, + }, + wantZone: "zoneA", + wantErr: false, + getNodeLabelFunc: func(_ context.Context, _ *service) (map[string]string, error) { + nodeLabels := map[string]string{validTopologyKey: validZone} + return nodeLabels, nil + }, + }, + "use bad zone label key": { + // The key args.zoneLabelKey will not be found in the map returned by getNodeLabelFunc + args: args{ + ctx: context.Background(), + zoneLabelKey: "badkey", + }, + wantZone: "", + wantErr: true, + getNodeLabelFunc: func(_ context.Context, _ *service) (map[string]string, error) { + return nil, nil + }, + }, + "fail to get node labels": { + // getNodeLabelFunc will return an error, triggering failure to get the labels + args: args{ + ctx: context.Background(), + zoneLabelKey: "unimportant", + }, + wantZone: "", + wantErr: true, + getNodeLabelFunc: func(_ context.Context, _ *service) (map[string]string, error) { + return nil, errors.New("") + }, + }, + } + for testName, tt := range tests { + t.Run(testName, func(t *testing.T) { + s := &service{ + opts: tt.fields.opts, + adminClients: tt.fields.adminClients, + systems: tt.fields.systems, + mode: tt.fields.mode, + volCache: tt.fields.volCache, + volCacheRWL: sync.RWMutex{}, + volCacheSystemID: tt.fields.volCacheSystemID, + snapCache: tt.fields.snapCache, + snapCacheRWL: sync.RWMutex{}, + snapCacheSystemID: tt.fields.snapCacheSystemID, + privDir: tt.fields.privDir, + storagePoolIDToName: tt.fields.storagePoolIDToName, + statisticsCounter: tt.fields.statisticsCounter, + volumePrefixToSystems: tt.fields.volumePrefixToSystems, + connectedSystemNameToID: tt.fields.connectedSystemNameToID, + } + GetNodeLabels = tt.getNodeLabelFunc + gotZone, err := s.getZoneFromZoneLabelKey(tt.args.ctx, tt.args.zoneLabelKey) + if (err != nil) != tt.wantErr { + t.Errorf("service.getZoneFromZoneLabelKey() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotZone != tt.wantZone { + t.Errorf("service.getZoneFromZoneLabelKey() = %v, want %v", gotZone, tt.wantZone) + } + }) + } +} + +func Test_service_getSystemIDFromZoneLabelKey(t *testing.T) { + type fields struct { + opts Opts + adminClients map[string]*sio.Client + systems map[string]*sio.System + mode string + volCache []*siotypes.Volume + volCacheSystemID string + snapCache []*siotypes.Volume + snapCacheSystemID string + privDir string + storagePoolIDToName map[string]string + statisticsCounter int + volumePrefixToSystems map[string][]string + connectedSystemNameToID map[string]string + } + + type args struct { + req *csi.GetCapacityRequest + } + + const validSystemID = "valid-id" + const validTopologyKey = "topology.kubernetes.io/zone" + const validZone = "zoneA" + + tests := map[string]struct { + fields fields + args args + wantSystemID string + wantErr bool + }{ + "success": { + // happy path test + wantErr: false, + wantSystemID: validSystemID, + args: args{ + req: &csi.GetCapacityRequest{ + AccessibleTopology: &csi.Topology{ + Segments: map[string]string{ + validTopologyKey: validZone, + }, + }, + }, + }, + fields: fields{ + opts: Opts{ + zoneLabelKey: validTopologyKey, + arrays: map[string]*ArrayConnectionData{ + "array1": { + SystemID: validSystemID, + AvailabilityZone: &AvailabilityZone{ + Name: validZone, + }, + }, + }, + }, + }, + }, + "topology not passed with csi request": { + // should return an empty string if no topology info is passed + // with the csi request + wantErr: false, + wantSystemID: "", + args: args{ + req: &csi.GetCapacityRequest{ + AccessibleTopology: &csi.Topology{ + // don't pass any topology info with the request + Segments: map[string]string{}, + }, + }, + }, + fields: fields{ + opts: Opts{ + zoneLabelKey: validTopologyKey, + }, + }, + }, + "zone name missing in secret": { + // topology information in the csi request does not match + // any of the arrays in the secret + wantErr: true, + wantSystemID: "", + args: args{ + req: &csi.GetCapacityRequest{ + AccessibleTopology: &csi.Topology{ + Segments: map[string]string{ + validTopologyKey: validZone, + }, + }, + }, + }, + fields: fields{ + opts: Opts{ + zoneLabelKey: validTopologyKey, + arrays: map[string]*ArrayConnectionData{ + "array1": { + SystemID: validSystemID, + AvailabilityZone: &AvailabilityZone{ + // ensure the zone name will not match the topology key value + // in the request + Name: validZone + "no-match", + }, + }, + }, + }, + }, + }, + } + for testName, tt := range tests { + t.Run(testName, func(t *testing.T) { + s := &service{ + opts: tt.fields.opts, + adminClients: tt.fields.adminClients, + systems: tt.fields.systems, + mode: tt.fields.mode, + volCache: tt.fields.volCache, + volCacheRWL: sync.RWMutex{}, + volCacheSystemID: tt.fields.volCacheSystemID, + snapCache: tt.fields.snapCache, + snapCacheRWL: sync.RWMutex{}, + snapCacheSystemID: tt.fields.snapCacheSystemID, + privDir: tt.fields.privDir, + storagePoolIDToName: tt.fields.storagePoolIDToName, + statisticsCounter: tt.fields.statisticsCounter, + volumePrefixToSystems: tt.fields.volumePrefixToSystems, + connectedSystemNameToID: tt.fields.connectedSystemNameToID, + } + gotSystemID, err := s.getSystemIDFromZoneLabelKey(tt.args.req) + if (err != nil) != tt.wantErr { + t.Errorf("service.getSystemIDFromZoneLabelKey() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotSystemID != tt.wantSystemID { + t.Errorf("service.getSystemIDFromZoneLabelKey() = %v, want %v", gotSystemID, tt.wantSystemID) + } + }) + } +} diff --git a/service/features/service.feature b/service/features/service.feature index a8ca2429..88a0bd4b 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -140,7 +140,7 @@ Feature: VxFlex OS CSI interface And the Controller has no connection When I invalidate the Probe cache And I call Probe - Then the error contains "unable to login to VxFlexOS Gateway" + Then the error contains "unable to login to PowerFlex Gateway" Scenario Outline: Probe Call with various errors Given a VxFlexOS service @@ -151,11 +151,11 @@ Feature: VxFlex OS CSI interface Examples: | error | msg | - | "NoEndpointError" | "missing VxFlexOS Gateway endpoint" | - | "NoUserError" | "missing VxFlexOS MDM user" | - | "NoPasswordError" | "missing VxFlexOS MDM password" | - | "NoSysNameError" | "missing VxFlexOS system name" | - | "WrongSysNameError" | "unable to find matching VxFlexOS system name" | + | "NoEndpointError" | "missing PowerFlex Gateway endpoint" | + | "NoUserError" | "missing PowerFlex MDM user" | + | "NoPasswordError" | "missing PowerFlex MDM password" | + | "NoSysNameError" | "missing PowerFlex system name" | + | "WrongSysNameError" | "unable to find matching PowerFlex system name" | # This injected error fails on Windows with no SDC but passes on Linux with SDC @@ -490,6 +490,18 @@ Feature: VxFlex OS CSI interface Given a VxFlexOS service When I call Probe And I call GetCapacity with storage pool "" + + Scenario: Call GetCapacity for a system using Availability zones + Given a VxFlexOS service + And I use config + When I call Probe + And I call GetCapacity with Availability Zone + Then the error contains + + Examples: + | config | zone-key | zone-name | errorMsg | + | "multi_az" | "zone.csi-vxflexos.dellemc.com" | "zoneA" | "none" | + | "multi_az" | "zone.csi-vxflexos.dellemc.com" | "badZone" | "could not find an array assigned to zone 'badZone'" | Scenario: Call GetCapacity with valid Storage Pool Name Given a VxFlexOS service @@ -1145,7 +1157,7 @@ Feature: VxFlex OS CSI interface Scenario: Call getSystemName, should get error Unable to probe system with ID Given a VxFlexOS service When I call getSystemNameError - Then the error contains "missing VxFlexOS system name" + Then the error contains "missing PowerFlex system name" Scenario: Call getSystemName, should get Found system Name: mocksystem Given a VxFlexOS service @@ -1178,14 +1190,14 @@ Feature: VxFlex OS CSI interface And I do not have a gateway connection And I do not have a valid gateway endpoint When I Call nodeGetAllSystems - Then the error contains "missing VxFlexOS Gateway endpoint" + Then the error contains "missing PowerFlex Gateway endpoint" Scenario: Call Node getAllSystems Given a VxFlexOS service And I do not have a gateway connection And I do not have a valid gateway password When I Call nodeGetAllSystems - Then the error contains "missing VxFlexOS MDM password" + Then the error contains "missing PowerFlex MDM password" Scenario: Call evalsymlinks Given a VxFlexOS service @@ -1251,7 +1263,7 @@ Feature: VxFlex OS CSI interface And I invalidate the Probe cache When I call BeforeServe # Get different error message on Windows vs. Linux - Then the error contains "unable to login to VxFlexOS Gateway" + Then the error contains "unable to login to PowerFlex Gateway" Scenario: Test getArrayConfig with invalid config file Given an invalid config @@ -1606,3 +1618,13 @@ Feature: VxFlex OS CSI interface Examples: | name | config | errorMsg | | "volume1" | "multi_az" | "none" | + + Scenario: Probe all systems using availability zones + Given a VxFlexOS service + And I use config + When I call systemProbeAll in mode + Then the error contains + Examples: + | config | mode | errorMsg | + | "multi_az" | "node" | "none" | + | "multi_az" | "controller" | "none" | \ No newline at end of file diff --git a/service/identity.go b/service/identity.go index b7ec6a2f..052a92d8 100644 --- a/service/identity.go +++ b/service/identity.go @@ -1,4 +1,4 @@ -// Copyright © 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. +// Copyright © 2019-2024 Dell Inc. or its subsidiaries. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -77,7 +77,6 @@ func (s *service) Probe( _ *csi.ProbeRequest) ( *csi.ProbeResponse, error, ) { - Log.Debug("Probe called") if !strings.EqualFold(s.mode, "node") { Log.Debug("systemProbe") if err := s.systemProbeAll(ctx); err != nil { @@ -92,10 +91,9 @@ func (s *service) Probe( return nil, err } } - ready := new(wrapperspb.BoolValue) - ready.Value = true - rep := new(csi.ProbeResponse) - rep.Ready = ready + rep := &csi.ProbeResponse{ + Ready: wrapperspb.Bool(true), + } Log.Debug(fmt.Sprintf("Probe returning: %v", rep.Ready.GetValue())) return rep, nil diff --git a/service/node.go b/service/node.go index 31162df0..5415be59 100644 --- a/service/node.go +++ b/service/node.go @@ -766,6 +766,11 @@ func (s *service) NodeGetInfo( if zone, ok := labels[s.opts.zoneLabelKey]; ok { topology[s.opts.zoneLabelKey] = zone + + err = s.SetPodZoneLabel(ctx, topology) + if err != nil { + Log.Warnf("Unable to set availability zone label '%s:%s' for this pod", topology[s.opts.zoneLabelKey], zone) + } } for _, array := range s.opts.arrays { diff --git a/service/service.go b/service/service.go index ddd79830..ca294761 100644 --- a/service/service.go +++ b/service/service.go @@ -715,15 +715,17 @@ func (s *service) doProbe(ctx context.Context) error { px.Lock() defer px.Unlock() - if !strings.EqualFold(s.mode, "node") { + if !s.isNodeMode() { + Log.Info("[doProbe] controllerProbe") if err := s.systemProbeAll(ctx); err != nil { return err } } // Do a node probe - if !strings.EqualFold(s.mode, "controller") { + if !s.isControllerMode() { // Probe all systems managed by driver + Log.Info("[doProbe] nodeProbe") if err := s.systemProbeAll(ctx); err != nil { return err } @@ -1853,6 +1855,49 @@ func (s *service) GetNodeLabels(_ context.Context) (map[string]string, error) { return node.Labels, nil } +func (s *service) SetPodZoneLabel(ctx context.Context, zoneLabel map[string]string) error { + if K8sClientset == nil { + err := k8sutils.CreateKubeClientSet() + if err != nil { + return status.Error(codes.Internal, GetMessage("init client failed with error: %v", err)) + } + K8sClientset = k8sutils.Clientset + } + + // access the API to fetch node object + pods, err := K8sClientset.CoreV1().Pods(DriverNamespace).List(ctx, v1.ListOptions{}) + if err != nil { + return status.Error(codes.Internal, GetMessage("Unable to fetch the node labels. Error: %v", err)) + } + + podName := "" + for _, pod := range pods.Items { + if pod.Spec.NodeName == s.opts.KubeNodeName && pod.Labels["app"] != "" { + // only add labels to node pods. Controller pod is not restricted to a zone + if strings.Contains(pod.Name, "node") { + podName = pod.Name + } + } + } + + pod, err := K8sClientset.CoreV1().Pods(DriverNamespace).Get(ctx, podName, v1.GetOptions{}) + if err != nil { + return status.Error(codes.Internal, GetMessage("Unable to fetch the node labels. Error: %v", err)) + } + + for key, value := range zoneLabel { + Log.Printf("Setting Label: Key: %s, Value: %s for pod: %s\n", key, value, podName) + pod.Labels[key] = value + } + + _, err = K8sClientset.CoreV1().Pods(DriverNamespace).Update(ctx, pod, v1.UpdateOptions{}) + if err != nil { + return status.Error(codes.Internal, GetMessage("Unable to update the node labels. Error: %v", err)) + } + + return nil +} + func (s *service) GetNodeUID(_ context.Context) (string, error) { if K8sClientset == nil { err := k8sutils.CreateKubeClientSet() @@ -1909,3 +1954,18 @@ func getZoneKeyLabelFromSecret(arrays map[string]*ArrayConnectionData) (string, return zoneKeyLabel, nil } + +// isControllerMode returns true if the mode property of service s is set to "node", false otherwise. +func (s *service) isNodeMode() bool { + return strings.EqualFold(s.mode, "node") +} + +// isControllerMode returns true if the mode property of service s is set to "controller", false otherwise. +func (s *service) isControllerMode() bool { + return strings.EqualFold(s.mode, "controller") +} + +// isInZone returns true if the array is configured for use in the provided zoneName, false otherwise. +func (array *ArrayConnectionData) isInZone(zoneName string) bool { + return array.AvailabilityZone != nil && array.AvailabilityZone.Name == ZoneName(zoneName) +} diff --git a/service/service_test.go b/service/service_test.go index ee3809cb..1430f86d 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -1,4 +1,4 @@ -// Copyright © 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. +// Copyright © 2019-2024 Dell Inc. or its subsidiaries. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,13 +14,20 @@ package service import ( + "context" "fmt" "net/http" "os" + "sync" "testing" "time" "github.com/cucumber/godog" + sio "github.com/dell/goscaleio" + siotypes "github.com/dell/goscaleio/types/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" ) func TestMain(m *testing.M) { @@ -56,3 +63,216 @@ func TestMain(m *testing.M) { os.Exit(status) } + +func Test_service_SetPodZoneLabel(t *testing.T) { + type fields struct { + opts Opts + adminClients map[string]*sio.Client + systems map[string]*sio.System + mode string + volCache []*siotypes.Volume + volCacheSystemID string + snapCache []*siotypes.Volume + snapCacheSystemID string + privDir string + storagePoolIDToName map[string]string + statisticsCounter int + volumePrefixToSystems map[string][]string + connectedSystemNameToID map[string]string + } + + type args struct { + ctx context.Context + zoneLabel map[string]string + } + + const validZoneName = "zoneA" + const validZoneLabelKey = "topology.kubernetes.io/zone" + const validAppName = "test-node-pod" + const validAppLabelKey = "app" + const validNodeName = "kube-node-name" + validAppLabels := map[string]string{validAppLabelKey: validAppName} + + tests := map[string]struct { + fields fields + args args + initTest func(s *service) + wantErr bool + }{ + "successfully add zone labels to a pod": { + // happy path test + wantErr: false, + args: args{ + ctx: context.Background(), + zoneLabel: map[string]string{ + validZoneLabelKey: validZoneName, + }, + }, + fields: fields{ + opts: Opts{ + KubeNodeName: validNodeName, + }, + }, + initTest: func(s *service) { + // setup fake k8s client and create a pod to perform tests against + K8sClientset = fake.NewSimpleClientset() + podClient := K8sClientset.CoreV1().Pods(DriverNamespace) + + // create test pod + _, err := podClient.Create(context.Background(), &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: validAppName, + Labels: validAppLabels, + }, + Spec: v1.PodSpec{ + NodeName: s.opts.KubeNodeName, + }, + }, metav1.CreateOptions{}) + if err != nil { + t.Errorf("error creating test pod error = %v", err) + } + }, + }, + "when 'list pods' k8s client request fails": { + // Attempt to set pod labels when the k8s client cannot get pods + wantErr: true, + args: args{ + ctx: context.Background(), + zoneLabel: map[string]string{ + validZoneLabelKey: validZoneName, + }, + }, + fields: fields{ + opts: Opts{ + KubeNodeName: validNodeName, + }, + }, + initTest: func(_ *service) { + // create a client, but do not create any pods so the request + // to list pods fails + K8sClientset = fake.NewSimpleClientset() + }, + }, + "clientset is nil and fails to create one": { + wantErr: true, + args: args{ + ctx: context.Background(), + zoneLabel: map[string]string{ + validZoneLabelKey: validZoneName, + }, + }, + fields: fields{ + opts: Opts{ + KubeNodeName: validNodeName, + }, + }, + initTest: func(_ *service) { + // setup clientset to nil to force creation + // Creation should fail because tests are not run in a cluster + K8sClientset = nil + }, + }, + } + + for testName, tt := range tests { + t.Run(testName, func(t *testing.T) { + s := &service{ + opts: tt.fields.opts, + adminClients: tt.fields.adminClients, + systems: tt.fields.systems, + mode: tt.fields.mode, + volCache: tt.fields.volCache, + volCacheRWL: sync.RWMutex{}, + volCacheSystemID: tt.fields.volCacheSystemID, + snapCache: tt.fields.snapCache, + snapCacheRWL: sync.RWMutex{}, + snapCacheSystemID: tt.fields.snapCacheSystemID, + privDir: tt.fields.privDir, + storagePoolIDToName: tt.fields.storagePoolIDToName, + statisticsCounter: tt.fields.statisticsCounter, + volumePrefixToSystems: tt.fields.volumePrefixToSystems, + connectedSystemNameToID: tt.fields.connectedSystemNameToID, + } + + tt.initTest(s) + err := s.SetPodZoneLabel(tt.args.ctx, tt.args.zoneLabel) + if (err != nil) != tt.wantErr { + t.Errorf("service.SetPodZoneLabel() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestArrayConnectionData_isInZone(t *testing.T) { + type fields struct { + SystemID string + Username string + Password string + Endpoint string + SkipCertificateValidation bool + Insecure bool + IsDefault bool + AllSystemNames string + NasName string + AvailabilityZone *AvailabilityZone + } + type args struct { + zoneName string + } + tests := map[string]struct { + fields fields + args args + want bool + }{ + "success": { + want: true, + fields: fields{ + AvailabilityZone: &AvailabilityZone{ + LabelKey: "topology.kubernetes.io/zone", + Name: "zoneA", + }, + }, + args: args{ + zoneName: "zoneA", + }, + }, + "availability zone is not used": { + want: false, + fields: fields{}, + args: args{ + zoneName: "zoneA", + }, + }, + "zone names do not match": { + want: false, + fields: fields{ + AvailabilityZone: &AvailabilityZone{ + LabelKey: "topology.kubernetes.io/zone", + Name: "zoneA", + }, + }, + args: args{ + zoneName: "zoneB", + }, + }, + } + for testName, tt := range tests { + t.Run(testName, func(t *testing.T) { + array := &ArrayConnectionData{ + SystemID: tt.fields.SystemID, + Username: tt.fields.Username, + Password: tt.fields.Password, + Endpoint: tt.fields.Endpoint, + SkipCertificateValidation: tt.fields.SkipCertificateValidation, + Insecure: tt.fields.Insecure, + IsDefault: tt.fields.IsDefault, + AllSystemNames: tt.fields.AllSystemNames, + NasName: tt.fields.NasName, + AvailabilityZone: tt.fields.AvailabilityZone, + } + if got := array.isInZone(tt.args.zoneName); got != tt.want { + t.Errorf("ArrayConnectionData.isInZone() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/service/step_defs_test.go b/service/step_defs_test.go index 9cdefbbd..3a7f138d 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -1,4 +1,4 @@ -// Copyright © 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved. +// Copyright © 2019-2024 Dell Inc. or its subsidiaries. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -326,9 +326,9 @@ func (f *feature) getService() *service { return f.service } var opts Opts - ctx := new(context.Context) + ctx := context.Background() var err error - opts.arrays, err = getArrayConfig(*ctx) + opts.arrays, err = getArrayConfig(ctx) if err != nil { log.Printf("Read arrays from config file failed: %s\n", err) } @@ -462,9 +462,9 @@ func (f *feature) aValidDynamicLogChange(file, expectedLevel string) error { // GetPluginInfo func (f *feature) iCallGetPluginInfo() error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.GetPluginInfoRequest) - f.getPluginInfoResponse, f.err = f.service.GetPluginInfo(*ctx, req) + f.getPluginInfoResponse, f.err = f.service.GetPluginInfo(ctx, req) if f.err != nil { return f.err } @@ -513,9 +513,9 @@ func (f *feature) aValidGetPlugInfoResponseIsReturned() error { } func (f *feature) iCallGetPluginCapabilities() error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.GetPluginCapabilitiesRequest) - f.getPluginCapabilitiesResponse, f.err = f.service.GetPluginCapabilities(*ctx, req) + f.getPluginCapabilitiesResponse, f.err = f.service.GetPluginCapabilities(ctx, req) if f.err != nil { return f.err } @@ -538,12 +538,12 @@ func (f *feature) aValidGetPluginCapabilitiesResponseIsReturned() error { } func (f *feature) iCallProbe() error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.ProbeRequest) f.checkGoRoutines("before probe") f.service.opts.AutoProbe = true f.service.mode = "controller" - f.probeResponse, f.err = f.service.Probe(*ctx, req) + f.probeResponse, f.err = f.service.Probe(ctx, req) f.checkGoRoutines("after probe") return nil } @@ -700,7 +700,7 @@ func (f *feature) iSpecifyCreateVolumeMountRequest(fstype string) error { } func (f *feature) iCallCreateVolume(name string) error { - ctx := new(context.Context) + ctx := context.Background() if f.createVolumeRequest == nil { req := getTypicalCreateVolumeRequest() f.createVolumeRequest = req @@ -715,7 +715,7 @@ func (f *feature) iCallCreateVolume(name string) error { fmt.Println("I am in iCallCreateVolume fn.....") - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(ctx, req) if f.err != nil { log.Printf("CreateVolume called failed: %s\n", f.err.Error()) } @@ -727,7 +727,7 @@ func (f *feature) iCallCreateVolume(name string) error { } func (f *feature) iCallValidateVolumeHostConnectivity() error { - ctx := new(context.Context) + ctx := context.Background() sdcID := f.service.opts.SdcGUID sdcGUID := strings.ToUpper(sdcID) @@ -764,7 +764,7 @@ func (f *feature) iCallValidateVolumeHostConnectivity() error { VolumeIds: volIDs, } - connect, err := f.service.ValidateVolumeHostConnectivity(*ctx, req) + connect, err := f.service.ValidateVolumeHostConnectivity(ctx, req) if err != nil { f.err = errors.New(err.Error()) return nil @@ -1026,7 +1026,7 @@ func (f *feature) iSpecifyNoStoragePool() error { } func (f *feature) iCallCreateVolumeSize(name string, size int64) error { - ctx := new(context.Context) + ctx := context.Background() var req *csi.CreateVolumeRequest if f.createVolumeRequest == nil { req = getTypicalCreateVolumeRequest() @@ -1040,7 +1040,7 @@ func (f *feature) iCallCreateVolumeSize(name string, size int64) error { req.Name = name f.createVolumeRequest = req - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(ctx, req) if f.err != nil { log.Printf("CreateVolumeSize called failed: %s\n", f.err.Error()) } @@ -1052,7 +1052,7 @@ func (f *feature) iCallCreateVolumeSize(name string, size int64) error { } func (f *feature) iCallCreateVolumeSizeNFS(name string, size int64) error { - ctx := new(context.Context) + ctx := context.Background() var req *csi.CreateVolumeRequest if f.createVolumeRequest == nil { req = getTypicalNFSCreateVolumeRequest() @@ -1066,7 +1066,7 @@ func (f *feature) iCallCreateVolumeSizeNFS(name string, size int64) error { req.Name = name f.createVolumeRequest = req - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(ctx, req) if f.err != nil { log.Printf("CreateVolumeSize called failed: %s\n", f.err.Error()) } @@ -1610,7 +1610,7 @@ func (f *feature) getControllerDeleteVolumeRequestNFS(accessType string) *csi.De } func (f *feature) iCallPublishVolumeWith(arg1 string) error { - ctx := new(context.Context) + ctx := context.Background() req := f.publishVolumeRequest if f.publishVolumeRequest == nil { req = f.getControllerPublishVolumeRequest(arg1) @@ -1618,7 +1618,7 @@ func (f *feature) iCallPublishVolumeWith(arg1 string) error { } log.Printf("Calling controllerPublishVolume") - f.publishVolumeResponse, f.err = f.service.ControllerPublishVolume(*ctx, req) + f.publishVolumeResponse, f.err = f.service.ControllerPublishVolume(ctx, req) if f.err != nil { log.Printf("PublishVolume call failed: %s\n", f.err.Error()) } @@ -1626,7 +1626,7 @@ func (f *feature) iCallPublishVolumeWith(arg1 string) error { } func (f *feature) iCallPublishVolumeWithNFS(arg1 string) error { - ctx := new(context.Context) + ctx := context.Background() req := f.publishVolumeRequest if f.publishVolumeRequest == nil { req = f.getControllerPublishVolumeRequestNFS(arg1) @@ -1683,7 +1683,7 @@ func (f *feature) iCallPublishVolumeWithNFS(arg1 string) error { } log.Printf("Calling controllerPublishVolume") - f.publishVolumeResponse, f.err = f.service.ControllerPublishVolume(*ctx, req) + f.publishVolumeResponse, f.err = f.service.ControllerPublishVolume(ctx, req) if f.err != nil { log.Printf("PublishVolume call failed: %s\n", f.err.Error()) } @@ -1806,14 +1806,14 @@ func (f *feature) getControllerUnpublishVolumeRequestNFS() *csi.ControllerUnpubl } func (f *feature) iCallUnpublishVolume() error { - ctx := new(context.Context) + ctx := context.Background() req := f.unpublishVolumeRequest if f.unpublishVolumeRequest == nil { req = f.getControllerUnpublishVolumeRequest() f.unpublishVolumeRequest = req } log.Printf("Calling controllerUnpublishVolume: %s", req.VolumeId) - f.unpublishVolumeResponse, f.err = f.service.ControllerUnpublishVolume(*ctx, req) + f.unpublishVolumeResponse, f.err = f.service.ControllerUnpublishVolume(ctx, req) if f.err != nil { log.Printf("UnpublishVolume call failed: %s\n", f.err.Error()) } @@ -1821,7 +1821,7 @@ func (f *feature) iCallUnpublishVolume() error { } func (f *feature) iCallUnpublishVolumeNFS() error { - ctx := new(context.Context) + ctx := context.Background() req := f.unpublishVolumeRequest if f.unpublishVolumeRequest == nil { req = f.getControllerUnpublishVolumeRequestNFS() @@ -1855,7 +1855,7 @@ func (f *feature) iCallUnpublishVolumeNFS() error { } log.Printf("Calling controllerUnpublishVolume: %s", req.VolumeId) - f.unpublishVolumeResponse, f.err = f.service.ControllerUnpublishVolume(*ctx, req) + f.unpublishVolumeResponse, f.err = f.service.ControllerUnpublishVolume(ctx, req) if f.err != nil { log.Printf("UnpublishVolume call failed: %s\n", f.err.Error()) } @@ -1877,50 +1877,50 @@ func (f *feature) theNumberOfSDCMappingsIs(arg1 int) error { } func (f *feature) iCallNodeGetInfo() error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.NodeGetInfoRequest) f.service.opts.SdcGUID = "9E56672F-2F4B-4A42-BFF4-88B6846FBFDA" GetNodeLabels = mockGetNodeLabels GetNodeUID = mockGetNodeUID - f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(*ctx, req) + f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(ctx, req) return nil } func (f *feature) iCallNodeGetInfoWithValidVolumeLimitNodeLabels() error { f.setFakeNode() - ctx := new(context.Context) + ctx := context.Background() req := new(csi.NodeGetInfoRequest) f.service.opts.SdcGUID = "9E56672F-2F4B-4A42-BFF4-88B6846FBFDA" GetNodeLabels = mockGetNodeLabelsWithVolumeLimits - f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(*ctx, req) + f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(ctx, req) fmt.Printf("MaxVolumesPerNode: %v", f.nodeGetInfoResponse.MaxVolumesPerNode) return nil } func (f *feature) iCallNodeGetInfoWithInvalidVolumeLimitNodeLabels() error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.NodeGetInfoRequest) f.service.opts.SdcGUID = "9E56672F-2F4B-4A42-BFF4-88B6846FBFDA" GetNodeLabels = mockGetNodeLabelsWithInvalidVolumeLimits - f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(*ctx, req) + f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(ctx, req) return nil } func (f *feature) iCallNodeGetInfoWithValidNodeUID() error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.NodeGetInfoRequest) GetNodeUID = mockGetNodeUID f.service.opts.SdcGUID = "" - f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(*ctx, req) + f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(ctx, req) fmt.Printf("NodeGetInfoResponse: %v", f.nodeGetInfoResponse) return nil } func (f *feature) iCallGetNodeUID() error { f.setFakeNode() - ctx := new(context.Context) + ctx := context.Background() nodeUID := "" - nodeUID, err := f.service.GetNodeUID(*ctx) + nodeUID, err := f.service.GetNodeUID(ctx) fmt.Printf("Node UID: %v", nodeUID) if err != nil { @@ -2012,8 +2012,8 @@ func (f *feature) iCallGetNodeLabelsWithInvalidNode() error { func (f *feature) iCallGetNodeLabelsWithUnsetKubernetesClient() error { K8sClientset = nil - ctx := new(context.Context) - f.nodeLabels, f.err = f.service.GetNodeLabels(*ctx) + ctx := context.Background() + f.nodeLabels, f.err = f.service.GetNodeLabels(ctx) return nil } @@ -2025,17 +2025,17 @@ func (f *feature) iCallGetNodeUIDWithInvalidNode() error { func (f *feature) iCallGetNodeUIDWithUnsetKubernetesClient() error { K8sClientset = nil - ctx := new(context.Context) - f.nodeUID, f.err = f.service.GetNodeUID(*ctx) + ctx := context.Background() + f.nodeUID, f.err = f.service.GetNodeUID(ctx) return nil } func (f *feature) iCallNodeProbe() error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.ProbeRequest) f.checkGoRoutines("before probe") f.service.mode = "node" - f.probeResponse, f.err = f.service.Probe(*ctx, req) + f.probeResponse, f.err = f.service.Probe(ctx, req) f.checkGoRoutines("after probe") return nil } @@ -2082,14 +2082,14 @@ func (f *feature) theVolumeLimitIsSet() error { } func (f *feature) iCallDeleteVolumeWith(arg1 string) error { - ctx := new(context.Context) + ctx := context.Background() req := f.deleteVolumeRequest if f.deleteVolumeRequest == nil { req = f.getControllerDeleteVolumeRequest(arg1) f.deleteVolumeRequest = req } log.Printf("Calling DeleteVolume") - f.deleteVolumeResponse, f.err = f.service.DeleteVolume(*ctx, req) + f.deleteVolumeResponse, f.err = f.service.DeleteVolume(ctx, req) if f.err != nil { log.Printf("DeleteVolume called failed: %s\n", f.err.Error()) } @@ -2097,14 +2097,14 @@ func (f *feature) iCallDeleteVolumeWith(arg1 string) error { } func (f *feature) iCallDeleteVolumeWithBad(arg1 string) error { - ctx := new(context.Context) + ctx := context.Background() req := f.deleteVolumeRequest if f.deleteVolumeRequest == nil { req = f.getControllerDeleteVolumeRequestBad(arg1) f.deleteVolumeRequest = req } log.Printf("Calling DeleteVolume") - f.deleteVolumeResponse, f.err = f.service.DeleteVolume(*ctx, req) + f.deleteVolumeResponse, f.err = f.service.DeleteVolume(ctx, req) if f.err != nil { log.Printf("DeleteVolume called failed: %s\n", f.err.Error()) } @@ -2112,14 +2112,14 @@ func (f *feature) iCallDeleteVolumeWithBad(arg1 string) error { } func (f *feature) iCallDeleteVolumeNFSWith(arg1 string) error { - ctx := new(context.Context) + ctx := context.Background() req := f.deleteVolumeRequest if f.deleteVolumeRequest == nil { req = f.getControllerDeleteVolumeRequestNFS(arg1) f.deleteVolumeRequest = req } log.Printf("Calling DeleteVolume") - f.deleteVolumeResponse, f.err = f.service.DeleteVolume(*ctx, req) + f.deleteVolumeResponse, f.err = f.service.DeleteVolume(ctx, req) if f.err != nil { log.Printf("DeleteVolume called failed: %s\n", f.err.Error()) } @@ -2147,7 +2147,7 @@ func (f *feature) theVolumeIsAlreadyMappedToAnSDC() error { } func (f *feature) iCallGetCapacityWithStoragePool(arg1 string) error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.GetCapacityRequest) if arg1 != "" { parameters := make(map[string]string) @@ -2155,7 +2155,31 @@ func (f *feature) iCallGetCapacityWithStoragePool(arg1 string) error { req.Parameters = parameters } log.Printf("Calling GetCapacity") - f.getCapacityResponse, f.err = f.service.GetCapacity(*ctx, req) + f.getCapacityResponse, f.err = f.service.GetCapacity(ctx, req) + if f.err != nil { + log.Printf("GetCapacity call failed: %s\n", f.err.Error()) + return nil + } + return nil +} + +func (f *feature) iCallGetCapacityWithAvailabilityZone(zoneLabelKey, zoneName string) error { + ctx := context.Background() + req := new(csi.GetCapacityRequest) + + // need to make sure parameters aren't empty. + // This is a parameter taken from a running driver. + parameters := make(map[string]string) + parameters["csi.storage.k8s.io/fstype"] = "xfs" + req.Parameters = parameters + req.AccessibleTopology = &csi.Topology{ + Segments: map[string]string{ + zoneLabelKey: zoneName, + }, + } + + log.Printf("Calling GetCapacity") + f.getCapacityResponse, f.err = f.service.GetCapacity(ctx, req) if f.err != nil { log.Printf("GetCapacity call failed: %s\n", f.err.Error()) return nil @@ -2205,10 +2229,10 @@ func (f *feature) iCallControllerGetCapabilities(isHealthMonitorEnabled string) if isHealthMonitorEnabled == "true" { f.service.opts.IsHealthMonitorEnabled = true } - ctx := new(context.Context) + ctx := context.Background() req := new(csi.ControllerGetCapabilitiesRequest) log.Printf("Calling ControllerGetCapabilities") - f.controllerGetCapabilitiesResponse, f.err = f.service.ControllerGetCapabilities(*ctx, req) + f.controllerGetCapabilitiesResponse, f.err = f.service.ControllerGetCapabilities(ctx, req) if f.err != nil { log.Printf("ControllerGetCapabilities call failed: %s\n", f.err.Error()) return f.err @@ -2263,7 +2287,7 @@ func (f *feature) iCallListVolumesWith(maxEntriesString, startingToken string) e return err } - ctx := new(context.Context) + ctx := context.Background() req := f.listVolumesRequest if f.listVolumesRequest == nil { switch st := startingToken; st { @@ -2286,7 +2310,7 @@ func (f *feature) iCallListVolumesWith(maxEntriesString, startingToken string) e f.listVolumesRequest = req } log.Printf("Calling ListVolumes with req=%+v", f.listVolumesRequest) - f.listVolumesResponse, f.err = f.service.ListVolumes(*ctx, req) + f.listVolumesResponse, f.err = f.service.ListVolumes(ctx, req) if f.err != nil { log.Printf("ListVolume called failed: %s\n", f.err.Error()) } else { @@ -2349,7 +2373,7 @@ func (f *feature) aValidControllerGetCapabilitiesResponseIsReturned() error { } func (f *feature) iCallCloneVolume() error { - ctx := new(context.Context) + ctx := context.Background() req := getTypicalCreateVolumeRequest() req.Name = "clone" if f.invalidVolumeID { @@ -2366,7 +2390,7 @@ func (f *feature) iCallCloneVolume() error { req.VolumeContentSource = new(csi.VolumeContentSource) req.VolumeContentSource.Type = &csi.VolumeContentSource_Volume{Volume: source} req.AccessibilityRequirements = new(csi.TopologyRequirement) - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(ctx, req) if f.err != nil { fmt.Printf("Error on CreateVolume from volume: %s\n", f.err.Error()) } @@ -2376,7 +2400,7 @@ func (f *feature) iCallCloneVolume() error { //nolint:revive func (f *feature) iCallValidateVolumeCapabilitiesWithVoltypeAccessFstype(voltype, access, fstype string) error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.ValidateVolumeCapabilitiesRequest) if f.invalidVolumeID || f.createVolumeResponse == nil { req.VolumeId = badVolumeID2 @@ -2417,7 +2441,7 @@ func (f *feature) iCallValidateVolumeCapabilitiesWithVoltypeAccessFstype(voltype capabilities = append(capabilities, capability) req.VolumeCapabilities = capabilities log.Printf("Calling ValidateVolumeCapabilities %#v", accessMode) - f.validateVolumeCapabilitiesResponse, f.err = f.service.ValidateVolumeCapabilities(*ctx, req) + f.validateVolumeCapabilitiesResponse, f.err = f.service.ValidateVolumeCapabilities(ctx, req) if f.err != nil { return nil } @@ -2896,8 +2920,8 @@ func (f *feature) iCallNodePublishVolumeNFS(arg1 string) error { func (f *feature) iCallUnmountPrivMount() error { gofsutil.GOFSMock.InduceGetMountsError = true - ctx := new(context.Context) - err := unmountPrivMount(*ctx, nil, "/foo/bar") + ctx := context.Background() + err := unmountPrivMount(ctx, nil, "/foo/bar") fmt.Printf("unmountPrivMount getMounts error: %s\n", err.Error()) // getMounts induced error if err != nil { @@ -2917,7 +2941,7 @@ func (f *feature) iCallUnmountPrivMount() error { gofsutil.GOFSMock.InduceGetMountsError = false gofsutil.GOFSMock.InduceUnmountError = true - err = unmountPrivMount(*ctx, nil, target) + err = unmountPrivMount(ctx, nil, target) fmt.Printf("unmountPrivMount unmount error: %s\n", err) if err != nil { f.err = errors.New("error in unmountPrivMount") @@ -3271,9 +3295,9 @@ func (f *feature) iCallBeforeServe() error { } func (f *feature) iCallNodeStageVolume() error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.NodeStageVolumeRequest) - _, f.err = f.service.NodeStageVolume(*ctx, req) + _, f.err = f.service.NodeStageVolume(ctx, req) return nil } @@ -3322,7 +3346,7 @@ func (f *feature) iCallNodeExpandVolume(volPath string) error { } func (f *feature) iCallNodeGetVolumeStats() error { - ctx := new(context.Context) + ctx := context.Background() VolumeID := sdcVolume1 VolumePath := datadir @@ -3355,7 +3379,7 @@ func (f *feature) iCallNodeGetVolumeStats() error { req := &csi.NodeGetVolumeStatsRequest{VolumeId: VolumeID, VolumePath: VolumePath} - f.nodeGetVolumeStatsResponse, f.err = f.service.NodeGetVolumeStats(*ctx, req) + f.nodeGetVolumeStatsResponse, f.err = f.service.NodeGetVolumeStats(ctx, req) return nil } @@ -3461,12 +3485,12 @@ func (f *feature) iCallNodeUnstageVolumeWith(errStr string) error { } func (f *feature) iCallNodeGetCapabilities(isHealthMonitorEnabled string) error { - ctx := new(context.Context) + ctx := context.Background() if isHealthMonitorEnabled == "true" { f.service.opts.IsHealthMonitorEnabled = true } req := new(csi.NodeGetCapabilitiesRequest) - f.nodeGetCapabilitiesResponse, f.err = f.service.NodeGetCapabilities(*ctx, req) + f.nodeGetCapabilitiesResponse, f.err = f.service.NodeGetCapabilities(ctx, req) return nil } @@ -3657,7 +3681,7 @@ func (f *feature) aValidCreateVolumeSnapshotGroupResponse() error { } func (f *feature) iCallCreateSnapshot(snapName string) error { - ctx := new(context.Context) + ctx := context.Background() if len(f.volumeIDList) == 0 { f.volumeIDList = append(f.volumeIDList, "00000000") @@ -3692,15 +3716,15 @@ func (f *feature) iCallCreateSnapshot(snapName string) error { } fmt.Println("snapName is: ", snapName) - fmt.Println("ctx: ", *ctx) + fmt.Println("ctx: ", ctx) fmt.Println("req: ", req) - f.createSnapshotResponse, f.err = f.service.CreateSnapshot(*ctx, req) + f.createSnapshotResponse, f.err = f.service.CreateSnapshot(ctx, req) return nil } func (f *feature) iCallCreateSnapshotNFS(snapName string) error { - ctx := new(context.Context) + ctx := context.Background() req := &csi.CreateSnapshotRequest{ SourceVolumeId: "14dbbf5617523654" + "/" + fileSystemNameToID["volume1"], @@ -3716,10 +3740,10 @@ func (f *feature) iCallCreateSnapshotNFS(snapName string) error { } fmt.Println("snapName is: ", snapName) - fmt.Println("ctx: ", *ctx) + fmt.Println("ctx: ", ctx) fmt.Println("req: ", req) - f.createSnapshotResponse, f.err = f.service.CreateSnapshot(*ctx, req) + f.createSnapshotResponse, f.err = f.service.CreateSnapshot(ctx, req) return nil } @@ -3743,7 +3767,7 @@ func (f *feature) aValidSnapshot() error { } func (f *feature) iCallDeleteSnapshot() error { - ctx := new(context.Context) + ctx := context.Background() req := &csi.DeleteSnapshotRequest{SnapshotId: goodSnapID, Secrets: make(map[string]string)} req.Secrets["x"] = "y" if f.invalidVolumeID { @@ -3751,12 +3775,12 @@ func (f *feature) iCallDeleteSnapshot() error { } else if f.noVolumeID { req.SnapshotId = "" } - _, f.err = f.service.DeleteSnapshot(*ctx, req) + _, f.err = f.service.DeleteSnapshot(ctx, req) return nil } func (f *feature) iCallDeleteSnapshotNFS() error { - ctx := new(context.Context) + ctx := context.Background() var req *csi.DeleteSnapshotRequest = new(csi.DeleteSnapshotRequest) if fileSystemNameToID["snap1"] == "" { req = &csi.DeleteSnapshotRequest{SnapshotId: "14dbbf5617523654" + "/" + "1111111", Secrets: make(map[string]string)} @@ -3765,7 +3789,7 @@ func (f *feature) iCallDeleteSnapshotNFS() error { } req.Secrets["x"] = "y" - _, f.err = f.service.DeleteSnapshot(*ctx, req) + _, f.err = f.service.DeleteSnapshot(ctx, req) return nil } @@ -3796,7 +3820,7 @@ func (f *feature) aValidSnapshotConsistencyGroup() error { } func (f *feature) iCallCreateVolumeFromSnapshot() error { - ctx := new(context.Context) + ctx := context.Background() req := getTypicalCreateVolumeRequest() req.Name = "volumeFromSnap" if f.wrongCapacity { @@ -3808,7 +3832,7 @@ func (f *feature) iCallCreateVolumeFromSnapshot() error { source := &csi.VolumeContentSource_SnapshotSource{SnapshotId: goodSnapID} req.VolumeContentSource = new(csi.VolumeContentSource) req.VolumeContentSource.Type = &csi.VolumeContentSource_Snapshot{Snapshot: source} - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(ctx, req) if f.err != nil { fmt.Printf("Error on CreateVolume from snap: %s\n", f.err.Error()) } @@ -3849,7 +3873,7 @@ func (f *feature) iCallCloneVolumeForZones(volumeID string) error { } func (f *feature) iCallCreateVolumeFromSnapshotNFS() error { - ctx := new(context.Context) + ctx := context.Background() req := getTypicalNFSCreateVolumeRequest() req.Name = "volumeFromSnap" if f.wrongCapacity { @@ -3861,7 +3885,7 @@ func (f *feature) iCallCreateVolumeFromSnapshotNFS() error { source := &csi.VolumeContentSource_SnapshotSource{SnapshotId: "14dbbf5617523654" + "/" + fileSystemNameToID["snap1"]} req.VolumeContentSource = new(csi.VolumeContentSource) req.VolumeContentSource.Type = &csi.VolumeContentSource_Snapshot{Snapshot: source} - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(ctx, req) if f.err != nil { fmt.Printf("Error on CreateVolume from snap: %s\n", f.err.Error()) } @@ -3903,7 +3927,7 @@ func (f *feature) iCallListSnapshotsWithMaxentriesAndStartingtoken(maxEntriesStr if err != nil { return nil } - ctx := new(context.Context) + ctx := context.Background() // ignoring integer overflow issue, will not be an issue if maxEntries is less than 2147483647 // #nosec G115 @@ -3911,7 +3935,7 @@ func (f *feature) iCallListSnapshotsWithMaxentriesAndStartingtoken(maxEntriesStr f.listSnapshotsRequest = req log.Printf("Calling ListSnapshots with req=%+v", f.listVolumesRequest) - f.listSnapshotsResponse, f.err = f.service.ListSnapshots(*ctx, req) + f.listSnapshotsResponse, f.err = f.service.ListSnapshots(ctx, req) if f.err != nil { log.Printf("ListSnapshots called failed: %s\n", f.err.Error()) } @@ -3924,7 +3948,7 @@ func (f *feature) iCallListSnapshotsForVolume(arg1 string) error { sourceVolumeID = altVolumeID } - ctx := new(context.Context) + ctx := context.Background() req := &csi.ListSnapshotsRequest{SourceVolumeId: sourceVolumeID} req.StartingToken = "0" req.MaxEntries = 100 @@ -3936,7 +3960,7 @@ func (f *feature) iCallListSnapshotsForVolume(arg1 string) error { f.listSnapshotsRequest = req log.Printf("Calling ListSnapshots with req=%+v", f.listSnapshotsRequest) - f.listSnapshotsResponse, f.err = f.service.ListSnapshots(*ctx, req) + f.listSnapshotsResponse, f.err = f.service.ListSnapshots(ctx, req) if f.err != nil { log.Printf("ListSnapshots called failed: %s\n", f.err.Error()) } @@ -3944,11 +3968,11 @@ func (f *feature) iCallListSnapshotsForVolume(arg1 string) error { } func (f *feature) iCallListSnapshotsForSnapshot(arg1 string) error { - ctx := new(context.Context) + ctx := context.Background() req := &csi.ListSnapshotsRequest{SnapshotId: arg1} f.listSnapshotsRequest = req log.Printf("Calling ListSnapshots with req=%+v", f.listVolumesRequest) - f.listSnapshotsResponse, f.err = f.service.ListSnapshots(*ctx, req) + f.listSnapshotsResponse, f.err = f.service.ListSnapshots(ctx, req) if f.err != nil { log.Printf("ListSnapshots called failed: %s\n", f.err.Error()) } @@ -4189,8 +4213,8 @@ func (f *feature) iCallGetSystemNameError() error { stepHandlersErrors.PodmonNodeProbeError = true // Unable to probe system with ID: - ctx := new(context.Context) - f.err = f.service.systemProbe(*ctx, badarray) + ctx := context.Background() + f.err = f.service.systemProbe(ctx, badarray) return nil } @@ -4204,9 +4228,9 @@ func (f *feature) iCallGetSystemName() error { func (f *feature) iCallNodeGetAllSystems() error { // lookup the system names for a couple of systems // This should not generate an error as systems without names are supported - ctx := new(context.Context) + ctx := context.Background() badarray := f.service.opts.arrays[arrayID] - f.err = f.service.systemProbe(*ctx, badarray) + f.err = f.service.systemProbe(ctx, badarray) return nil } @@ -4249,8 +4273,8 @@ func (f *feature) anInvalidMaxVolumesPerNode() error { } func (f *feature) iCallGetArrayConfig() error { - ctx := new(context.Context) - _, err := getArrayConfig(*ctx) + ctx := context.Background() + _, err := getArrayConfig(ctx) if err != nil { f.err = err } @@ -4265,8 +4289,8 @@ func (f *feature) iCallgetArrayInstallationID(systemID string) error { } func (f *feature) iCallSetQoSParameters(systemID string, sdcID string, bandwidthLimit string, iopsLimit string, volumeName string, csiVolID string, nodeID string) error { - ctx := new(context.Context) - f.err = f.service.setQoSParameters(*ctx, systemID, sdcID, bandwidthLimit, iopsLimit, volumeName, csiVolID, nodeID) + ctx := context.Background() + f.err = f.service.setQoSParameters(ctx, systemID, sdcID, bandwidthLimit, iopsLimit, volumeName, csiVolID, nodeID) if f.err != nil { fmt.Printf("error in setting QoS parameters for volume %s : %s\n", volumeName, f.err.Error()) } @@ -4307,10 +4331,40 @@ func (f *feature) iUseConfig(filename string) error { return nil } +func (f *feature) iCallSystemProbeAll(mode string) error { + // set the mode of the service + if mode == "controller" || mode == "node" { + f.service.mode = mode + } else { + return fmt.Errorf("mode '%s' is not a valid service mode. must be 'controller' or 'node'", mode) + } + + // Create a fake node with necessary availability zone labels + f.service.opts.KubeNodeName = "node1" + fakeNode := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: f.service.opts.KubeNodeName, + UID: "1aa4c285-d41b-4911-bf3e-621253bfbade", + Labels: map[string]string{ + f.service.opts.zoneLabelKey: string(f.service.opts.arrays[arrayID].AvailabilityZone.Name), + }, + }, + } + thisNode, err := K8sClientset.CoreV1().Nodes().Create(context.Background(), fakeNode, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not create k8s node for test: err: %s", err.Error()) + } + // delete it when finished, otherwise it will fail to create nodes for subsequent tests + defer K8sClientset.CoreV1().Nodes().Delete(context.Background(), thisNode.Name, *&metav1.DeleteOptions{}) + + f.err = f.service.systemProbeAll(context.Background()) + return nil +} + func (f *feature) iCallGetReplicationCapabilities() error { req := &replication.GetReplicationCapabilityRequest{} - ctx := new(context.Context) - f.replicationCapabilitiesResponse, f.err = f.service.GetReplicationCapabilities(*ctx, req) + ctx := context.Background() + f.replicationCapabilitiesResponse, f.err = f.service.GetReplicationCapabilities(ctx, req) log.Printf("GetReplicationCapabilities returned %+v", f.replicationCapabilitiesResponse) return nil } @@ -4389,7 +4443,7 @@ func (f *feature) iSetApproveSdcEnabled(approveSDCEnabled string) error { } func (f *feature) iCallCreateRemoteVolume() error { - ctx := new(context.Context) + ctx := context.Background() req := &replication.CreateRemoteVolumeRequest{} if f.createVolumeResponse == nil { return errors.New("iCallCreateRemoteVolume: f.createVolumeResponse is nil") @@ -4405,7 +4459,7 @@ func (f *feature) iCallCreateRemoteVolume() error { f.service.WithRP(KeyReplicationRemoteStoragePool): "viki_pool_HDD_20181031", f.service.WithRP(KeyReplicationRemoteSystem): "15dbbf5617523655", } - _, f.err = f.service.CreateRemoteVolume(*ctx, req) + _, f.err = f.service.CreateRemoteVolume(ctx, req) if f.err != nil { fmt.Printf("CreateRemoteVolumeRequest returned error: %s", f.err) } @@ -4413,7 +4467,7 @@ func (f *feature) iCallCreateRemoteVolume() error { } func (f *feature) iCallDeleteLocalVolume(name string) error { - ctx := new(context.Context) + ctx := context.Background() replicatedVolName := "replicated-" + name volumeHandle := arrayID2 + "-" + volumeNameToID[replicatedVolName] @@ -4431,7 +4485,7 @@ func (f *feature) iCallDeleteLocalVolume(name string) error { VolumeHandle: volumeHandle, } - _, f.err = f.service.DeleteLocalVolume(*ctx, req) + _, f.err = f.service.DeleteLocalVolume(ctx, req) if f.err != nil { fmt.Printf("DeleteLocalVolume returned error: %s", f.err) } @@ -4440,7 +4494,7 @@ func (f *feature) iCallDeleteLocalVolume(name string) error { } func (f *feature) iCallCreateStorageProtectionGroup() error { - ctx := new(context.Context) + ctx := context.Background() parameters := make(map[string]string) // Must be repeatable. @@ -4481,12 +4535,12 @@ func (f *feature) iCallCreateStorageProtectionGroup() error { if stepHandlersErrors.BadVolIDError { req.VolumeHandle = "0%0" } - f.createStorageProtectionGroupResponse, f.err = f.service.CreateStorageProtectionGroup(*ctx, req) + f.createStorageProtectionGroupResponse, f.err = f.service.CreateStorageProtectionGroup(ctx, req) return nil } func (f *feature) iCallCreateStorageProtectionGroupWith(arg1, arg2, arg3 string) error { - ctx := new(context.Context) + ctx := context.Background() parameters := make(map[string]string) // Must be repeatable. @@ -4503,12 +4557,12 @@ func (f *feature) iCallCreateStorageProtectionGroupWith(arg1, arg2, arg3 string) Parameters: parameters, } - f.createStorageProtectionGroupResponse, f.err = f.service.CreateStorageProtectionGroup(*ctx, req) + f.createStorageProtectionGroupResponse, f.err = f.service.CreateStorageProtectionGroup(ctx, req) return nil } func (f *feature) iCallGetStorageProtectionGroupStatus() error { - ctx := new(context.Context) + ctx := context.Background() attributes := make(map[string]string) replicationGroupConsistMode = defaultConsistencyMode @@ -4518,13 +4572,13 @@ func (f *feature) iCallGetStorageProtectionGroupStatus() error { ProtectionGroupId: f.createStorageProtectionGroupResponse.LocalProtectionGroupId, ProtectionGroupAttributes: attributes, } - _, f.err = f.service.GetStorageProtectionGroupStatus(*ctx, req) + _, f.err = f.service.GetStorageProtectionGroupStatus(ctx, req) return nil } func (f *feature) iCallGetStorageProtectionGroupStatusWithStateAndMode(arg1, arg2 string) error { - ctx := new(context.Context) + ctx := context.Background() attributes := make(map[string]string) replicationGroupState = arg1 @@ -4535,7 +4589,7 @@ func (f *feature) iCallGetStorageProtectionGroupStatusWithStateAndMode(arg1, arg ProtectionGroupId: f.createStorageProtectionGroupResponse.LocalProtectionGroupId, ProtectionGroupAttributes: attributes, } - _, f.err = f.service.GetStorageProtectionGroupStatus(*ctx, req) + _, f.err = f.service.GetStorageProtectionGroupStatus(ctx, req) return nil } @@ -4547,12 +4601,12 @@ func (f *feature) iCallDeleteVolume(name string) error { for name, id := range volumeNameToID { fmt.Printf("volNameToID name %s id %s\n", name, id) } - ctx := new(context.Context) + ctx := context.Background() req := f.getControllerDeleteVolumeRequest("single-writer") id := arrayID + "-" + volumeNameToID[name] log.Printf("iCallDeleteVolume name %s to ID %s", name, id) req.VolumeId = id - f.deleteVolumeResponse, f.err = f.service.DeleteVolume(*ctx, req) + f.deleteVolumeResponse, f.err = f.service.DeleteVolume(ctx, req) if f.err != nil { fmt.Printf("DeleteVolume error: %s", f.err) } @@ -4560,19 +4614,19 @@ func (f *feature) iCallDeleteVolume(name string) error { } func (f *feature) iCallDeleteStorageProtectionGroup() error { - ctx := new(context.Context) + ctx := context.Background() attributes := make(map[string]string) attributes[f.service.opts.replicationContextPrefix+"systemName"] = arrayID req := &replication.DeleteStorageProtectionGroupRequest{ ProtectionGroupId: f.createStorageProtectionGroupResponse.LocalProtectionGroupId, ProtectionGroupAttributes: attributes, } - f.deleteStorageProtectionGroupResponse, f.err = f.service.DeleteStorageProtectionGroup(*ctx, req) + f.deleteStorageProtectionGroupResponse, f.err = f.service.DeleteStorageProtectionGroup(ctx, req) return nil } func (f *feature) iCallExecuteAction(arg1 string) error { - ctx := new(context.Context) + ctx := context.Background() attributes := make(map[string]string) remoteAttributes := make(map[string]string) @@ -4611,7 +4665,7 @@ func (f *feature) iCallExecuteAction(arg1 string) error { ActionTypes: &action, } - _, f.err = f.service.ExecuteAction(*ctx, req) + _, f.err = f.service.ExecuteAction(ctx, req) return nil } @@ -4744,7 +4798,7 @@ func getZoneEnabledRequest(zoneLabelName string) *csi.CreateVolumeRequest { } func (f *feature) iCallCreateVolumeWithZones(name string) error { - ctx := new(context.Context) + ctx := context.Background() if f.createVolumeRequest == nil { req := getZoneEnabledRequest(f.service.opts.zoneLabelKey) f.createVolumeRequest = req @@ -4754,7 +4808,7 @@ func (f *feature) iCallCreateVolumeWithZones(name string) error { fmt.Printf("I am in iCallCreateVolume with zones fn with req => ..... %v ...", req) - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(ctx, req) if f.err != nil { log.Printf("CreateVolume with zones called failed: %s\n", f.err.Error()) } @@ -4771,12 +4825,12 @@ func mockGetNodeLabelsWithZone(_ context.Context, s *service) (map[string]string } func (f *feature) iCallNodeGetInfoWithZoneLabels() error { - ctx := new(context.Context) + ctx := context.Background() req := new(csi.NodeGetInfoRequest) f.service.opts.SdcGUID = "9E56672F-2F4B-4A42-BFF4-88B6846FBFDA" GetNodeLabels = mockGetNodeLabelsWithZone GetNodeUID = mockGetNodeUID - f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(*ctx, req) + f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(ctx, req) return nil } @@ -4890,6 +4944,7 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^a valid DeleteVolumeResponse is returned$`, f.aValidDeleteVolumeResponseIsReturned) s.Step(`^the volume is already mapped to an SDC$`, f.theVolumeIsAlreadyMappedToAnSDC) s.Step(`^I call GetCapacity with storage pool "([^"]*)"$`, f.iCallGetCapacityWithStoragePool) + s.Step(`^I call GetCapacity with Availability Zone "([^"]*)" "([^"]*)"$`, f.iCallGetCapacityWithAvailabilityZone) s.Step(`^a valid GetCapacityResponse is returned$`, f.aValidGetCapacityResponseIsReturned) s.Step(`^a valid GetCapacityResponse1 is returned$`, f.aValidGetCapacityResponsewithmaxvolsizeIsReturned) s.Step(`^I call get GetMaximumVolumeSize with systemid "([^"]*)"$`, f.iCallGetMaximumVolumeSize) @@ -5041,6 +5096,7 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^a valid NodeGetInfo is returned with node topology$`, f.aValidNodeGetInfoIsReturnedWithNodeTopology) s.Step(`^a NodeGetInfo is returned without zone topology$`, f.aNodeGetInfoIsReturnedWithoutZoneTopology) s.Step(`^a NodeGetInfo is returned without zone system topology$`, f.aNodeGetInfoIsReturnedWithoutZoneSystemTopology) + s.Step(`^I call systemProbeAll in mode "([^"]*)"`, f.iCallSystemProbeAll) s.After(func(ctx context.Context, _ *godog.Scenario, _ error) (context.Context, error) { if f.server != nil { From 160dbc421e3b5fa9fb46a209268a8faa2f789b97 Mon Sep 17 00:00:00 2001 From: Fernando Alfaro Campos Date: Wed, 18 Dec 2024 16:39:34 -0500 Subject: [PATCH 12/15] Fix rebase issues --- go.sum | 8 --- service/preinit.go | 9 ++- service/preinit_test.go | 126 ++++++++++++++++++++-------------------- 3 files changed, 69 insertions(+), 74 deletions(-) diff --git a/go.sum b/go.sum index b7c71831..7912bda8 100644 --- a/go.sum +++ b/go.sum @@ -106,14 +106,6 @@ github.com/dell/gocsi v1.12.0 h1:Dn/8f2BLovo57T/aC5pP/4Eqz4h6WX8SbX+hxT5NlvQ= github.com/dell/gocsi v1.12.0/go.mod h1:hJURrmDrXDGW4xVtgi5Kx6zUsU3ht9l+nlreNx33rf0= github.com/dell/gofsutil v1.17.0 h1:QA6gUb1mz8kXNEN4eEx47OHCz8nSqZrrCnaDUYmV5EY= github.com/dell/gofsutil v1.17.0/go.mod h1:PN2hWl/pVLQiTsFR0X1x+GfhfOrfW8pGgH5xGcGMeFs= -github.com/dell/goscaleio v1.17.2-0.20241209165307-dcbadc33ab2e h1:Y+F8YP3ceH6XRz0phFV5VpS2Pmoi8f0Vtg261/C2pZo= -github.com/dell/goscaleio v1.17.2-0.20241209165307-dcbadc33ab2e/go.mod h1:7bX3rL8JWMmdifGr/UeD/Ju9wbkHUqvKDrbdu7XyGm8= -github.com/dell/goscaleio v1.17.2-0.20241213145027-141cfe292cfa h1:9honWWT9xEcI0OWyLtiWIDCaMAEpBAeyyzW+KPRVh10= -github.com/dell/goscaleio v1.17.2-0.20241213145027-141cfe292cfa/go.mod h1:7bX3rL8JWMmdifGr/UeD/Ju9wbkHUqvKDrbdu7XyGm8= -github.com/dell/goscaleio v1.17.2-0.20241213204026-19006b56eb26 h1:Kg6MSwBmAlmUDWRKiG0YJRv1xd8qi9+mW7vAVNnghj4= -github.com/dell/goscaleio v1.17.2-0.20241213204026-19006b56eb26/go.mod h1:7bX3rL8JWMmdifGr/UeD/Ju9wbkHUqvKDrbdu7XyGm8= -github.com/dell/goscaleio v1.17.2-0.20241213215244-2164caaef4ab h1:DYWY7fs8v1VbmzF2pAx7peZtKhkppW5NCIyHCJN1fS4= -github.com/dell/goscaleio v1.17.2-0.20241213215244-2164caaef4ab/go.mod h1:7bX3rL8JWMmdifGr/UeD/Ju9wbkHUqvKDrbdu7XyGm8= github.com/dell/goscaleio v1.17.2-0.20241218182509-936b677c46d5 h1:d7DwHvp7/hESR742f4iurtH3nHHSGPvnMadujZA2hsU= github.com/dell/goscaleio v1.17.2-0.20241218182509-936b677c46d5/go.mod h1:2BsR92dYYnSmbZ34ixYdsucfyoQBDlbhbUUKnv6WalQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= diff --git a/service/preinit.go b/service/preinit.go index a785e049..5066218e 100644 --- a/service/preinit.go +++ b/service/preinit.go @@ -138,7 +138,7 @@ func getMdmList(connectionData []*ArrayConnectionData, key, zone string) (string sb := &strings.Builder{} for _, connectionData := range connectionData { - if connectionData.Mdm != "" && connectionData.Zone.LabelKey == key && connectionData.Zone.Name == zone { + if connectionData.Mdm != "" && connectionData.AvailabilityZone != nil && connectionData.AvailabilityZone.LabelKey == key && string(connectionData.AvailabilityZone.Name) == zone { if sb.Len() > 0 { sb.WriteString("\\&") } @@ -158,10 +158,13 @@ func getLabelKey(connectionData []*ArrayConnectionData) (string, error) { return "", fmt.Errorf("array connection data is empty") } - labelKey := connectionData[0].Zone.LabelKey + labelKey := "" + if connectionData[0].AvailabilityZone != nil { + labelKey = connectionData[0].AvailabilityZone.LabelKey + } for _, v := range connectionData { - if v.Zone.LabelKey != labelKey { + if v.AvailabilityZone != nil && v.AvailabilityZone.LabelKey != labelKey { return "", fmt.Errorf("zone label key is not the same for all arrays") } } diff --git a/service/preinit_test.go b/service/preinit_test.go index b37322eb..273e044b 100644 --- a/service/preinit_test.go +++ b/service/preinit_test.go @@ -76,13 +76,13 @@ func TestPreInit(t *testing.T) { name: "should error when zone labels different", connectionInfo: []*ArrayConnectionData{ { - Zone: ZoneInfo{ + AvailabilityZone: &AvailabilityZone{ LabelKey: "key1", Name: "zone1", }, }, { - Zone: ZoneInfo{ + AvailabilityZone: &AvailabilityZone{ LabelKey: "key2", Name: "zone1", }, @@ -119,7 +119,7 @@ func TestPreInit(t *testing.T) { connectionInfo: []*ArrayConnectionData{ { Mdm: "192.168.1.1,192.168.1.2", - Zone: ZoneInfo{ + AvailabilityZone: &AvailabilityZone{ LabelKey: "key1", Name: "zone1", }, @@ -133,7 +133,7 @@ func TestPreInit(t *testing.T) { connectionInfo: []*ArrayConnectionData{ { Mdm: "192.168.1.1,192.168.1.2", - Zone: ZoneInfo{ + AvailabilityZone: &AvailabilityZone{ LabelKey: "key1", Name: "zone1", }, @@ -153,7 +153,7 @@ func TestPreInit(t *testing.T) { connectionInfo: []*ArrayConnectionData{ { Mdm: "192.168.1.1,192.168.1.2", - Zone: ZoneInfo{ + AvailabilityZone: &AvailabilityZone{ LabelKey: "key1", Name: "zone1", }, @@ -173,14 +173,14 @@ func TestPreInit(t *testing.T) { connectionInfo: []*ArrayConnectionData{ { Mdm: "192.168.1.1,192.168.1.2", - Zone: ZoneInfo{ + AvailabilityZone: &AvailabilityZone{ LabelKey: "key1", Name: "zone1", }, }, { Mdm: "192.168.2.1,192.168.2.2", - Zone: ZoneInfo{ + AvailabilityZone: &AvailabilityZone{ LabelKey: "key1", Name: "zone2", }, @@ -314,9 +314,9 @@ func TestGetMdmList(t *testing.T) { name: "single MDM", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID", - Mdm: "192.168.0.10", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey"}, + SystemID: "testSystemID", + Mdm: "192.168.0.10", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey"}, }, }, key: "testKey", @@ -328,9 +328,9 @@ func TestGetMdmList(t *testing.T) { name: "two MDMs", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID", - Mdm: "192.168.0.10,192.168.0.20", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey"}, + SystemID: "testSystemID", + Mdm: "192.168.0.10,192.168.0.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey"}, }, }, key: "testKey", @@ -342,14 +342,14 @@ func TestGetMdmList(t *testing.T) { name: "two arrays", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID1", - Mdm: "192.168.0.10,192.168.0.20", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey"}, + SystemID: "testSystemID1", + Mdm: "192.168.0.10,192.168.0.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey"}, }, { - SystemID: "testSystemID2", - Mdm: "192.168.1.10,192.168.1.20", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey"}, + SystemID: "testSystemID2", + Mdm: "192.168.1.10,192.168.1.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey"}, }, }, key: "testKey", @@ -361,14 +361,14 @@ func TestGetMdmList(t *testing.T) { name: "two arrays with one MDM each", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID1", - Mdm: "192.168.0.10", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey"}, + SystemID: "testSystemID1", + Mdm: "192.168.0.10", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey"}, }, { - SystemID: "testSystemID2", - Mdm: "192.168.1.10", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey"}, + SystemID: "testSystemID2", + Mdm: "192.168.1.10", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey"}, }, }, key: "testKey", @@ -380,14 +380,14 @@ func TestGetMdmList(t *testing.T) { name: "two arrays with multiple zones 1", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID1", - Mdm: "192.168.0.10,192.168.0.20", - Zone: ZoneInfo{Name: "testZone1", LabelKey: "testKey"}, + SystemID: "testSystemID1", + Mdm: "192.168.0.10,192.168.0.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone1", LabelKey: "testKey"}, }, { - SystemID: "testSystemID2", - Mdm: "192.168.1.10,192.168.1.20", - Zone: ZoneInfo{Name: "testZone2", LabelKey: "testKey"}, + SystemID: "testSystemID2", + Mdm: "192.168.1.10,192.168.1.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone2", LabelKey: "testKey"}, }, }, key: "testKey", @@ -399,14 +399,14 @@ func TestGetMdmList(t *testing.T) { name: "two arrays with multiple zones 2", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID1", - Mdm: "192.168.0.10,192.168.0.20", - Zone: ZoneInfo{Name: "testZone1", LabelKey: "testKey"}, + SystemID: "testSystemID1", + Mdm: "192.168.0.10,192.168.0.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone1", LabelKey: "testKey"}, }, { - SystemID: "testSystemID2", - Mdm: "192.168.1.10,192.168.1.20", - Zone: ZoneInfo{Name: "testZone2", LabelKey: "testKey"}, + SystemID: "testSystemID2", + Mdm: "192.168.1.10,192.168.1.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone2", LabelKey: "testKey"}, }, }, key: "testKey", @@ -418,14 +418,14 @@ func TestGetMdmList(t *testing.T) { name: "two arrays in same zone with different keys 1", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID1", - Mdm: "192.168.0.10,192.168.0.20", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey1"}, + SystemID: "testSystemID1", + Mdm: "192.168.0.10,192.168.0.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey1"}, }, { - SystemID: "testSystemID2", - Mdm: "192.168.1.10,192.168.1.20", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey2"}, + SystemID: "testSystemID2", + Mdm: "192.168.1.10,192.168.1.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey2"}, }, }, key: "testKey1", @@ -437,14 +437,14 @@ func TestGetMdmList(t *testing.T) { name: "two arrays in same zone with different keys 2", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID1", - Mdm: "192.168.0.10,192.168.0.20", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey1"}, + SystemID: "testSystemID1", + Mdm: "192.168.0.10,192.168.0.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey1"}, }, { - SystemID: "testSystemID2", - Mdm: "192.168.1.10,192.168.1.20", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey2"}, + SystemID: "testSystemID2", + Mdm: "192.168.1.10,192.168.1.20", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey2"}, }, }, key: "testKey2", @@ -498,12 +498,12 @@ func TestGetLabelKey(t *testing.T) { name: "zone is not empty with different keys", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID1", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey1"}, + SystemID: "testSystemID1", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey1"}, }, { - SystemID: "testSystemID2", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey2"}, + SystemID: "testSystemID2", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey2"}, }, }, errorExpected: true, @@ -516,8 +516,8 @@ func TestGetLabelKey(t *testing.T) { SystemID: "testSystemID1", }, { - SystemID: "testSystemID2", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey1"}, + SystemID: "testSystemID2", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey1"}, }, }, errorExpected: true, @@ -527,12 +527,12 @@ func TestGetLabelKey(t *testing.T) { name: "same key in all zones", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID1", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey1"}, + SystemID: "testSystemID1", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey1"}, }, { - SystemID: "testSystemID2", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey1"}, + SystemID: "testSystemID2", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey1"}, }, }, errorExpected: false, @@ -542,12 +542,12 @@ func TestGetLabelKey(t *testing.T) { name: "case sensitivity test for key", connectionInfo: []*ArrayConnectionData{ { - SystemID: "testSystemID1", - Zone: ZoneInfo{Name: "testZone", LabelKey: "testKey1"}, + SystemID: "testSystemID1", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "testKey1"}, }, { - SystemID: "testSystemID2", - Zone: ZoneInfo{Name: "testZone", LabelKey: "TestKey1"}, + SystemID: "testSystemID2", + AvailabilityZone: &AvailabilityZone{Name: "testZone", LabelKey: "TestKey1"}, }, }, errorExpected: true, From 23a138e1b7347f8db609d46a97f3da5242ff651a Mon Sep 17 00:00:00 2001 From: Fernando Alfaro Campos Date: Thu, 19 Dec 2024 11:08:34 -0500 Subject: [PATCH 13/15] Address PR comments --- service/node.go | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/service/node.go b/service/node.go index 5415be59..a678cbd8 100644 --- a/service/node.go +++ b/service/node.go @@ -773,6 +773,15 @@ func (s *service) NodeGetInfo( } } + nodeID, err := GetNodeUID(ctx, s) + if err != nil { + return nil, status.Error(codes.InvalidArgument, GetMessage("Could not fetch node UID")) + } + + if s.opts.SdcGUID != "" { + nodeID = s.opts.SdcGUID + } + for _, array := range s.opts.arrays { isNFS, err := s.checkNFS(ctx, array.SystemID) if err != nil { @@ -786,26 +795,15 @@ func (s *service) NodeGetInfo( if zone, ok := topology[s.opts.zoneLabelKey]; ok { if zone == string(array.AvailabilityZone.Name) { // Add only the secret values with the correct zone. - nodeID, _ := GetNodeUID(ctx, s) Log.Infof("Zone found for node ID: %s, adding system ID: %s to node topology", nodeID, array.SystemID) topology[Name+"/"+array.SystemID] = SystemTopologySystemValue } } else { - nodeID, _ := GetNodeUID(ctx, s) Log.Infof("No zoning found for node ID: %s, adding system ID: %s", nodeID, array.SystemID) topology[Name+"/"+array.SystemID] = SystemTopologySystemValue } } - nodeID, err := GetNodeUID(ctx, s) - if err != nil { - return nil, status.Error(codes.InvalidArgument, GetMessage("Could not fetch node UID")) - } - - if s.opts.SdcGUID != "" { - nodeID = s.opts.SdcGUID - } - Log.Debugf("NodeId: %v\n", nodeID) return &csi.NodeGetInfoResponse{ NodeId: nodeID, From 519fbc120b81746869537af0426271e5033064f7 Mon Sep 17 00:00:00 2001 From: Fernando Alfaro Campos Date: Thu, 19 Dec 2024 13:38:22 -0500 Subject: [PATCH 14/15] Address PR comments --- samples/secret.yaml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/samples/secret.yaml b/samples/secret.yaml index 868e0d9a..fa3251c4 100644 --- a/samples/secret.yaml +++ b/samples/secret.yaml @@ -54,11 +54,11 @@ # # pools: A list of pools that belong to a single protection defined in the PowerFlex system. # # Currently, csi-powerflex only supports one pool per protection domain. # - pools: - # - "pool1" + # - # # name: The name of the protection domain in the PowerFlex system. # # Optional: true # # name is required if storage pool names are not unique across protection domains. - # name: "domain1" + # name: # To add more PowerFlex systems, uncomment the following lines and provide the required values # - username: "admin" # password: "password" @@ -67,8 +67,9 @@ # skipCertificateValidation: true # mdm: "10.0.0.3,10.0.0.4" # zone: -# name: "ZoneB" +# name: "zoneB" +# labelKey: "topology.kubernetes.io/zone" # protectionDomains: -# - name: "my-app-domain" +# - name: # pools: -# - "my-backend-pool" +# - From fd77e96fcda5cafbd5e1b1e4075e8d3d473dd536 Mon Sep 17 00:00:00 2001 From: Fernando Alfaro Campos Date: Thu, 19 Dec 2024 15:07:11 -0500 Subject: [PATCH 15/15] Address PR comments --- samples/storageclass/storageclass-az.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/samples/storageclass/storageclass-az.yaml b/samples/storageclass/storageclass-az.yaml index c58a6974..dbad02cb 100644 --- a/samples/storageclass/storageclass-az.yaml +++ b/samples/storageclass/storageclass-az.yaml @@ -44,11 +44,10 @@ parameters: volumeBindingMode: WaitForFirstConsumer # allowedTopologies helps scheduling pods on worker nodes which match all of below expressions. # by providing the zone key, the scheduler can make sure that pods are scheduled on the same zone. -# Note: The node must have the same label value with the key zone.csi-vxflexos.dellemc.com and a single -# associated zone. +# Note: The node must have the same label value with the key and a single associated zone. allowedTopologies: - matchLabelExpressions: - - key: zone.csi-vxflexos.dellemc.com + - key: topology.kubernetes.io/zone values: - zoneA - zoneB