From a5e0f818fbef8370357efe042aeedf4413404056 Mon Sep 17 00:00:00 2001 From: Rohit-PX Date: Tue, 11 Apr 2023 21:59:57 +0000 Subject: [PATCH] changes for testrail auto-update Signed-off-by: Rohit-PX --- test/integration_test/common_test.go | 109 ++++++++++++++++++- test/integration_test/extender_test.go | 72 ++++++++++++ test/integration_test/health_monitor_test.go | 32 ++++++ test/integration_test/stork-test-pod.yaml | 14 +++ test/integration_test/test-deploy.sh | 10 ++ 5 files changed, 234 insertions(+), 3 deletions(-) diff --git a/test/integration_test/common_test.go b/test/integration_test/common_test.go index f2a27c9510..a066495fac 100644 --- a/test/integration_test/common_test.go +++ b/test/integration_test/common_test.go @@ -48,6 +48,8 @@ import ( _ "github.com/portworx/torpedo/drivers/volume/generic_csi" _ "github.com/portworx/torpedo/drivers/volume/linstor" _ "github.com/portworx/torpedo/drivers/volume/portworx" + "github.com/portworx/torpedo/pkg/log" + testrailutils "github.com/portworx/torpedo/pkg/testrailuttils" "github.com/sirupsen/logrus" "github.com/skyrings/skyring-common/tools/uuid" "github.com/stretchr/testify/require" @@ -117,6 +119,17 @@ const ( tokenKey = "token" clusterIP = "ip" clusterPort = "port" + + testResultPass = "Pass" + testResultFail = "Fail" + + testrailRunNameVar = "TESTRAIL_RUN_NAME" + testrailRunIDVar = "TESTRAIL_RUN_ID" + testrailJenkinsBuildURLVar = "TESTRAIL_JENKINS_BUILD_URL" + testrailHostVar = "TESTRAIL_HOST" + testrailUserNameVar = "TESTRAIL_USERNAME" + testrailPasswordVar = "TESTRAIL_PASSWORD" + testrailMilestoneVar = "TESTRAIL_MILESTONE" ) var nodeDriver node.Driver @@ -140,6 +153,11 @@ var storkVersionCheck bool var cloudDeletionValidate bool var isInternalLBAws bool var pxNamespace string +var storkVersion string +var testrailHostname string +var testrailUsername string +var testrailPassword string +var testrailSetupSuccessful bool func TestSnapshot(t *testing.T) { t.Run("testSnapshot", testSnapshot) @@ -248,13 +266,16 @@ func setup() error { if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("Unable to get stork version configmap: %v", err) } - if cm != nil && storkVersionCheck == true { + if cm != nil { ver, ok := cm.Data["version"] if !ok { return fmt.Errorf("stork version not found in configmap: %s", cmName) } - if getStorkVersion(ver) != getStorkVersion(version.Version) { - return fmt.Errorf("stork version mismatch, found: %s, expected: %s", getStorkVersion(ver), getStorkVersion(version.Version)) + storkVersion = getStorkVersion(ver) + if storkVersionCheck == true { + if getStorkVersion(ver) != getStorkVersion(version.Version) { + return fmt.Errorf("stork version mismatch, found: %s, expected: %s", getStorkVersion(ver), getStorkVersion(version.Version)) + } } } @@ -287,6 +308,8 @@ func setup() error { return fmt.Errorf("at the end of setup, setting kubeconfig to source failed in setup: %v", err) } + SetupTestRail() + return nil } @@ -1062,6 +1085,86 @@ func IsEks() bool { return false } +// SetupTestRail checks if the required parameters for testrail are passed, verifies connectivity and creates milestone if it does not exist +func SetupTestRail() { + if testrailutils.RunName = os.Getenv(testrailRunNameVar); testrailutils.RunName != "" { + logrus.Infof("Testrail Run name: %s", testrailutils.RunName) + } + if testrailutils.JobRunID = os.Getenv(testrailRunIDVar); testrailutils.JobRunID != "" { + logrus.Infof("Testrail Run ID: %s", testrailutils.JobRunID) + } + if testrailutils.MilestoneName = os.Getenv(testrailMilestoneVar); testrailutils.MilestoneName != "" { + logrus.Infof("Testrail Milestone %s", testrailutils.MilestoneName) + } + if testrailutils.JenkinsBuildURL = os.Getenv(testrailJenkinsBuildURLVar); testrailutils.JenkinsBuildURL != "" { + logrus.Infof("Testrail Jenkins Build URL: %s", testrailutils.JenkinsBuildURL) + } + if testrailHostname = os.Getenv(testrailHostVar); testrailHostname != "" { + logrus.Infof("Testrail Host: %s", testrailHostname) + } + if testrailUsername = os.Getenv(testrailUserNameVar); testrailUsername != "" { + logrus.Infof("Testrail Host: %s", testrailUsername) + } + if testrailPassword = os.Getenv(testrailPasswordVar); testrailPassword != "" { + logrus.Infof("Testrail run name: %s", testrailPassword) + } + if testrailHostname != "" && testrailUsername != "" && testrailPassword != "" { + err := testrailutils.Init(testrailHostname, testrailUsername, testrailPassword) + if err == nil { + if testrailutils.MilestoneName == "" || testrailutils.RunName == "" || testrailutils.JobRunID == "" { + err = fmt.Errorf("not all details provided to update testrail") + log.FailOnError(err, "Error occurred while testrail initialization") + } + testrailutils.CreateMilestone() + testrailSetupSuccessful = true + } + logrus.Infof("Testrail setup is successful, will log results to testrail automatically. Details:\nMilestone: %s, Testrun: %s", + testrailutils.MilestoneName, testrailutils.RunName) + } else { + logrus.Warn("Not all information to connect to testrail is provided, skipping updates to testrail") + } +} + +func updateTestRail(testStatus *string, ids ...int) { + if ids[0] != 0 && ids[1] != 0 { + testrailObject := testrailutils.Testrail{ + Status: *testStatus, + TestID: ids[0], + RunID: ids[1], + DriverVersion: storkVersion, + } + testrailutils.AddTestEntry(testrailObject) + log.Infof("Testrail testrun url: %s/index.php?/runs/view/%d&group_by=cases:custom_automated&group_order=asc&group_id=%d", testrailHostname, ids[1], testrailutils.PwxProjectID) + } else { + logrus.Warnf("Skipping testrail update for this case, testID: %d, testrun: %d", ids[0], ids[1]) + } +} + +func testrailSetupForTest(testrailID int, testResult *string) int { + runID, err := addRunToMilestone(testrailID, testResult) + if err != nil { + logrus.Warnf("For current case: %d, not adding this run to testrail", testrailID) + return 0 + } + return runID +} + +func addRunToMilestone(testrailID int, testResult *string) (int, error) { + var runID int + var err error + if testrailutils.JobRunID != "" { + if testrailID == 0 { + return 0, fmt.Errorf("invalid testcase ID: %v", testrailID) + } + runID, err = strconv.Atoi(testrailutils.JobRunID) + if err != nil { + return 0, fmt.Errorf("invalid testrail run ID: %v", testrailutils.JobRunID) + } + } + runID = testrailutils.AddRunsToMilestone(testrailID) + return runID, nil +} + func TestMain(m *testing.M) { flag.IntVar(&snapshotScaleCount, "snapshot-scale-count", diff --git a/test/integration_test/extender_test.go b/test/integration_test/extender_test.go index 8a8587f05e..ef3b68d879 100644 --- a/test/integration_test/extender_test.go +++ b/test/integration_test/extender_test.go @@ -45,6 +45,10 @@ func TestExtender(t *testing.T) { } func noPVCTest(t *testing.T) { + var testrailID, testResult = 50785, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "nopvctest"), scheduler.ScheduleOptions{AppKeys: []string{"mysql-nopvc"}}) require.NoError(t, err, "Error scheduling task") @@ -54,9 +58,17 @@ func noPVCTest(t *testing.T) { require.NoError(t, err, "Error waiting for pod to get to running state") destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func singlePVCTest(t *testing.T) { + var testrailID, testResult = 50786, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "singlepvctest"), scheduler.ScheduleOptions{AppKeys: []string{"mysql-1-pvc"}}) require.NoError(t, err, "Error scheduling task") @@ -75,9 +87,17 @@ func singlePVCTest(t *testing.T) { verifyScheduledNode(t, scheduledNodes[0], volumeNames) destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func statefulsetTest(t *testing.T) { + var testrailID, testResult = 50787, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "sstest"), scheduler.ScheduleOptions{AppKeys: []string{"elasticsearch"}}) require.NoError(t, err, "Error scheduling task") @@ -104,9 +124,17 @@ func statefulsetTest(t *testing.T) { verifyScheduledNode(t, scheduledNodes[0], volumeNames) destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func multiplePVCTest(t *testing.T) { + var testrailID, testResult = 50788, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "multipvctest"), scheduler.ScheduleOptions{AppKeys: []string{"mysql-2-pvc"}}) require.NoError(t, err, "Error scheduling task") @@ -124,9 +152,17 @@ func multiplePVCTest(t *testing.T) { verifyScheduledNode(t, scheduledNodes[0], volumeNames) destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func driverNodeErrorTest(t *testing.T) { + var testrailID, testResult = 50789, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "drivererrtest"), scheduler.ScheduleOptions{AppKeys: []string{"mysql-1-pvc"}}) require.NoError(t, err, "Error scheduling task") @@ -173,9 +209,17 @@ func driverNodeErrorTest(t *testing.T) { require.NoError(t, err, "Error waiting for Node to start %+v", scheduledNodes[0]) destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func pvcOwnershipTest(t *testing.T) { + var testrailID, testResult = 50781, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "ownershiptest"), scheduler.ScheduleOptions{AppKeys: []string{"mysql-repl-1"}}) require.NoError(t, err, "Error scheduling task") @@ -246,9 +290,17 @@ func pvcOwnershipTest(t *testing.T) { require.NoError(t, err, "Volume driver is not up on Node %+v", scheduledNodes[0]) destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func antihyperconvergenceTest(t *testing.T) { + var testrailID, testResult = 85859, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule("antihyperconvergencetest", scheduler.ScheduleOptions{ AppKeys: []string{"test-sv4-svc-repl1"}, @@ -271,9 +323,17 @@ func antihyperconvergenceTest(t *testing.T) { verifyAntihyperconvergence(t, scheduledNodes, volumeNames) destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) { + var testrailID, testResult = 85860, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule("preferremoteonlytest", scheduler.ScheduleOptions{ AppKeys: []string{"test-sv4-svc-repl1-prefer-remote-only"}, @@ -326,6 +386,10 @@ func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) { err = core.Instance().UnCordonNode(schedNode.Name, defaultWaitTimeout, defaultWaitInterval) require.NoError(t, err, "Error uncordorning k8s node for stork test pod") } + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func verifyAntihyperconvergence(t *testing.T, appNodes []node.Node, volumes []string) { @@ -363,6 +427,10 @@ func verifyAntihyperconvergence(t *testing.T, appNodes []node.Node, volumes []st } func equalPodSpreadTest(t *testing.T) { + var testrailID, testResult = 84664, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule("equal-pod-spread-test", scheduler.ScheduleOptions{ AppKeys: []string{"postgres"}, @@ -394,4 +462,8 @@ func equalPodSpreadTest(t *testing.T) { logrus.Info("Deleting apps created by the test") destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } diff --git a/test/integration_test/health_monitor_test.go b/test/integration_test/health_monitor_test.go index 61dd502b2e..65339394ce 100644 --- a/test/integration_test/health_monitor_test.go +++ b/test/integration_test/health_monitor_test.go @@ -39,6 +39,10 @@ func TestHealthMonitor(t *testing.T) { } func stopDriverTest(t *testing.T) { + var testrailID, testResult = 50790, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "stopdrivertest"), scheduler.ScheduleOptions{AppKeys: []string{"mysql-1-pvc"}}) require.NoError(t, err, "Error scheduling task") @@ -85,9 +89,17 @@ func stopDriverTest(t *testing.T) { require.NoError(t, err, "Error waiting for Node to start %+v", scheduledNodes[0]) destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func stopKubeletTest(t *testing.T) { + var testrailID, testResult = 50791, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + // Cordon node where the test is running. This is so that we don't end up stopping // kubelet on the node where the stork-test pod is running testPodNode := "" @@ -136,9 +148,17 @@ func stopKubeletTest(t *testing.T) { destroyAndWait(t, ctxs) + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) + } func healthCheckFixTest(t *testing.T) { + var testrailID, testResult = 85900, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + // When a node's storage is offline stork should not bounce pods right away. // It now waits for a minute and checks again to see if the storage driver is still offline. // Bringing back node's storage within a minute should not affect anything @@ -217,9 +237,17 @@ func healthCheckFixTest(t *testing.T) { } destroyAndWait(t, ctxs) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func stopDriverCsiPodFailoverTest(t *testing.T) { + var testrailID, testResult = 85901, testResultFail + runID := testrailSetupForTest(testrailID, &testResult) + defer updateTestRail(&testResult, testrailID, runID) + // Verify CSI pods are running on online nodes logrus.Infof("Checking if CSI pods are initially scheduled on online PX nodes") verifyCsiPodsRunningOnOnlineNode(t) @@ -269,6 +297,10 @@ func stopDriverCsiPodFailoverTest(t *testing.T) { err = volumeDriver.StartDriver(nodeNameMap[nodeName]) require.NoError(t, err, "Error re-starting driver on Node %+v", nodeNameMap[podToFailover.Spec.NodeName]) + + // If we are here then the test has passed + testResult = testResultPass + logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) } func verifyCsiPodsRunningOnOnlineNode(t *testing.T) { diff --git a/test/integration_test/stork-test-pod.yaml b/test/integration_test/stork-test-pod.yaml index a73390bf16..02c6b61087 100644 --- a/test/integration_test/stork-test-pod.yaml +++ b/test/integration_test/stork-test-pod.yaml @@ -96,6 +96,20 @@ spec: value: px_namespace - name: INTERNAL_AWS_LB value: internal_aws_lb + - name: TESTRAIL_RUN_NAME + value: testrail_run_name + - name: TESTRAIL_RUN_ID + value: testrail_run_id + - name: TESTRAIL_JENKINS_BUILD_URL + value: testrail_jenkins_build_url + - name: TESTRAIL_HOST + value: testrail_host + - name: TESTRAIL_USERNAME + value: testrail_uame + - name: TESTRAIL_PASSWORD + value: testrail_pwd + - name: TESTRAIL_MILESTONE + value: testrail_milestone hostNetwork: false hostNetwork: false hostPID: false diff --git a/test/integration_test/test-deploy.sh b/test/integration_test/test-deploy.sh index a44a554c3b..9bf7c8901c 100755 --- a/test/integration_test/test-deploy.sh +++ b/test/integration_test/test-deploy.sh @@ -177,6 +177,7 @@ case $i in px_namespace=$2 shift shift + ;; esac done @@ -304,6 +305,15 @@ sed -i 's/'password'/'"$SSH_PASSWORD"'/g' /testspecs/stork-test-pod.yaml sed -i 's|'openstorage/stork_test:.*'|'"$test_image_name"'|g' /testspecs/stork-test-pod.yaml sed -i 's/'backup_location_path'/'"$backup_location_path"'/g' /testspecs/stork-test-pod.yaml +# testrail params +sed -i 's/testrail_run_name/'"$TESTRAIL_RUN_NAME"'/g' /testspecs/stork-test-pod.yaml +sed -i 's/testrail_run_id/'"\"$TESTRAIL_RUN_ID\""'/g' /testspecs/stork-test-pod.yaml +sed -i 's/testrail_jenkins_build_url/'"$TESTRAIL_JENKINS_BUILD_URL"'/g' /testspecs/stork-test-pod.yaml +sed -i 's/testrail_host/'"$TESTRAIL_HOST"'/g' /testspecs/stork-test-pod.yaml +sed -i 's/testrail_uame/'"$TESTRAIL_USERNAME"'/g' /testspecs/stork-test-pod.yaml +sed -i 's/testrail_pwd/'"$TESTRAIL_PASSWORD"'/g' /testspecs/stork-test-pod.yaml +sed -i 's/testrail_milestone/'"$TESTRAIL_MILESTONE"'/g' /testspecs/stork-test-pod.yaml + # Add AWS creds to stork-test pod sed -i 's/'aws_access_key_id'/'"$aws_id"'/g' /testspecs/stork-test-pod.yaml sed -i 's|'aws_secret_access_key'|'"$aws_key"'|g' /testspecs/stork-test-pod.yaml