Skip to content

Commit

Permalink
Update e2e test scenarios
Browse files Browse the repository at this point in the history
  • Loading branch information
falfaroc committed Dec 2, 2024
1 parent c16f4df commit e23ff8f
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 35 deletions.
70 changes: 43 additions & 27 deletions test/e2e/e2e.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,15 +55,15 @@ func (f *feature) aVxFlexOSService() error {
}

func (f *feature) isEverythingWorking() error {
fmt.Println("Checking if everything is working...")
log.Println("[isEverythingWorking] Checking if everything is working...")
checkNamespace := "kubectl get ns -A | grep -e " + driverNamespace + " -e " + testNamespace
result, err := execLocalCommand(checkNamespace)
if err != nil {
return err
}

if !strings.Contains(string(result), driverNamespace) || !strings.Contains(string(result), testNamespace) {
return fmt.Errorf("Namespace vxflexos or reptest not found")
return fmt.Errorf("namespace vxflexos or reptest not found")
}

checkDeployment := "kubectl get deployment -n " + driverNamespace + " vxflexos-controller -o json"
Expand All @@ -79,13 +79,32 @@ func (f *feature) isEverythingWorking() error {
}

if deploymentInfo.Status.Replicas != deploymentInfo.Status.ReadyReplicas {
return fmt.Errorf("Deployment not ready, check deployment status and then try again")
return fmt.Errorf("deployment not ready, check deployment status and then try again")
}

return nil
}

func (f *feature) getNodeAndSCInformation(zoneKey string) error {
func (f *feature) verifyZoneInfomation(secret, namespace string) error {
arrays, err := f.getZoneFromSecret(secret, namespace)
if err != nil {
return err
}

for _, array := range arrays {
if array.AvailabilityZone == nil {
continue
}

// Find the first zone label and assume all others are the same..
f.zoneKey = array.AvailabilityZone.LabelKey
break
}

if f.zoneKey == "" {
return fmt.Errorf("no labelKey found in secret %s", secret)
}

getNodeLabels := []string{"kubectl", "get", "nodes", "-A", "-o", "jsonpath='{.items}'"}
justString := strings.Join(getNodeLabels, " ")

Expand All @@ -105,17 +124,15 @@ func (f *feature) getNodeAndSCInformation(zoneKey string) error {
continue
}

if val, ok := node.ObjectMeta.Labels[zoneKey]; ok {
if val, ok := node.ObjectMeta.Labels[f.zoneKey]; ok {
f.zoneNodeMapping[node.ObjectMeta.Name] = val
}
}

if len(f.zoneNodeMapping) == 0 {
return fmt.Errorf("No nodes found for zone: %s", zoneKey)
return fmt.Errorf("no nodes found for zone: %s", f.zoneKey)
}

f.zoneKey = zoneKey

getStorageClassCmd := "kubectl get sc " + storageClass
_, err = execLocalCommand(getStorageClassCmd)
if err != nil {
Expand All @@ -137,8 +154,8 @@ func (f *feature) getNodeAndSCInformation(zoneKey string) error {
return fmt.Errorf("no topologies found for storage class %s not found", storageClass)
}

if scInfo.AllowedTopologies[0].MatchLabelExpressions[0].Key != zoneKey {
return fmt.Errorf("storage class %s does not have the proper zone lablel %s", storageClass, zoneKey)
if scInfo.AllowedTopologies[0].MatchLabelExpressions[0].Key != f.zoneKey {
return fmt.Errorf("storage class %s does not have the proper zone lablel %s", storageClass, f.zoneKey)
}

// Add supported zones from the test storage class.
Expand All @@ -147,27 +164,27 @@ func (f *feature) getNodeAndSCInformation(zoneKey string) error {
return nil
}

func (f *feature) verifyZonesInSecret(secretName, namespace string) error {
func (f *feature) getZoneFromSecret(secretName, namespace string) ([]service.ArrayConnectionData, error) {
getSecretInformation := []string{"kubectl", "get", "secrets", "-n", namespace, secretName, "-o", "jsonpath='{.data.config}'"}
justString := strings.Join(getSecretInformation, " ")

result, err := execLocalCommand(justString)
if err != nil {
return err
return nil, err
}

dec, err := base64.StdEncoding.DecodeString(string(result))
if err != nil {
return err
return nil, err
}

arrayConnection := make([]service.ArrayConnectionData, 0)
err = yaml.Unmarshal(dec, &arrayConnection)
if err != nil {
return err
return nil, err
}

return nil
return arrayConnection, nil
}

func (f *feature) createZoneVolumes(fileLocation string) error {
Expand All @@ -185,7 +202,7 @@ func (f *feature) createZoneVolumes(fileLocation string) error {

time.Sleep(10 * time.Second)

fmt.Printf("Created volumes and pods...\n")
log.Println("[createZoneVolumes] Created volumes and pods...")
return nil
}

Expand All @@ -204,7 +221,7 @@ func (f *feature) deleteZoneVolumes(fileLocation string) error {

time.Sleep(10 * time.Second)

fmt.Println("Deleted volumes and pods...")
log.Println("[deleteZoneVolumes] Deleted volumes and pods...")
return nil
}

Expand All @@ -213,7 +230,7 @@ func (f *feature) checkPodsStatus() error {

_, err := f.areAllPodsRunning()
if err != nil {
return fmt.Errorf("Pods not ready, check pods status and then try again")
return fmt.Errorf("pods not ready, check pods status and then try again")
}

return nil
Expand Down Expand Up @@ -251,10 +268,10 @@ func (f *feature) areAllPodsRunning() ([]v1Core.Pod, error) {
}

if !ready {
return nil, fmt.Errorf("Pods not ready, check pods status and then try again")
return nil, fmt.Errorf("pods not ready, check pods status and then try again")
}

fmt.Println("All pods are ready")
log.Println("[areAllPodsRunning] All pods are ready")

return podInfo, nil
}
Expand All @@ -265,7 +282,7 @@ func (f *feature) cordonNode() error {

// Get the first node in the zone
for key := range f.zoneNodeMapping {
fmt.Printf("Cordoning node: %s\n", key)
log.Printf("Cordoning node: %s\n", key)
nodeToCordon = key
break
}
Expand All @@ -291,7 +308,7 @@ func (f *feature) cordonNode() error {
return fmt.Errorf("node %s not cordoned", nodeToCordon)
}

fmt.Println("Cordoned node correctly")
log.Println("Cordoned node correctly")
f.cordonedNode = nodeToCordon

return nil
Expand All @@ -302,16 +319,16 @@ func (f *feature) checkPodsForCordonRun() error {

pods, err := f.areAllPodsRunning()
if err != nil {
return fmt.Errorf("Pods not ready, check pods status and then try again")
return fmt.Errorf("pods not ready, check pods status and then try again")
}

for _, pod := range pods {
if pod.Spec.NodeName == f.cordonedNode {
return fmt.Errorf("Pod %s scheduled incorrectly", pod.ObjectMeta.Name)
return fmt.Errorf("pod %s scheduled incorrectly", pod.ObjectMeta.Name)
}
}

fmt.Println("Pods scheduled correctly, reseting node...")
log.Println("[checkPodsForCordonRun] Pods scheduled correctly, reseting node...")

// Reset node since scheduled correctly
uncordonNodeCommand := "kubectl uncordon " + f.cordonedNode
Expand Down Expand Up @@ -342,8 +359,7 @@ func InitializeScenario(s *godog.ScenarioContext) {

s.Step(`^a VxFlexOS service$`, f.aVxFlexOSService)
s.Step(`^verify driver is configured and running correctly$`, f.isEverythingWorking)
s.Step(`^verify that the node and storage class zone label is "([^"]*)"$`, f.getNodeAndSCInformation)
s.Step(`^verify that the secret "([^"]*)" in namespace "([^"]*)" is set for zoning$`, f.verifyZonesInSecret)
s.Step(`^verify zone information from secret "([^"]*)" in namespace "([^"]*)"$`, f.verifyZoneInfomation)
s.Step(`^create zone volume and pod in "([^"]*)"$`, f.createZoneVolumes)
s.Step(`^delete zone volume and pod in "([^"]*)"$`, f.deleteZoneVolumes)
s.Step(`^check pods to be running on desired zones$`, f.checkPodsStatus)
Expand Down
14 changes: 6 additions & 8 deletions test/e2e/features/e2e.feature
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,24 @@ Feature: VxFlex OS CSI interface
Scenario: Create zone volume through k8s
Given a VxFlexOS service
And verify driver is configured and running correctly
And verify that the node and storage class zone label is <zoneLabelKey>
And verify that the secret <secret> in namespace <namespace> is set for zoning
And verify zone information from secret <secret> in namespace <namespace>
Then create zone volume and pod in <location>
And check pods to be running on desired zones
Then delete zone volume and pod in <location>
Examples:
| zoneLabelKey | secret | namespace | location |
| "zone.csi-vxflexos.dellemc.com" | "vxflexos-config" | "vxflexos" | "zone-wait" |
| secret | namespace | location |
| "vxflexos-config" | "vxflexos" | "zone-wait" |

@zone
Scenario: Cordon node and create zone volume through k8s
Given a VxFlexOS service
And verify driver is configured and running correctly
And verify that the node and storage class zone label is <zoneLabelKey>
And verify that the secret <secret> in namespace <namespace> is set for zoning
And verify zone information from secret <secret> in namespace <namespace>
Then cordon one node
Then create zone volume and pod in <location>
And ensure pods aren't scheduled incorrectly and still running
Then delete zone volume and pod in <location>
Examples:
| zoneLabelKey | secret | namespace | location |
| "zone.csi-vxflexos.dellemc.com" | "vxflexos-config" | "vxflexos" | "zone-wait" |
| secret | namespace | location |
| "vxflexos-config" | "vxflexos" | "zone-wait" |

0 comments on commit e23ff8f

Please sign in to comment.