Skip to content

Commit

Permalink
changes for testrail auto-update
Browse files Browse the repository at this point in the history
Signed-off-by: Rohit-PX <[email protected]>
  • Loading branch information
Rohit-PX authored and Rohit-PX committed Apr 27, 2023
1 parent 5972223 commit a5e0f81
Show file tree
Hide file tree
Showing 5 changed files with 234 additions and 3 deletions.
109 changes: 106 additions & 3 deletions test/integration_test/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ import (
_ "github.com/portworx/torpedo/drivers/volume/generic_csi"
_ "github.com/portworx/torpedo/drivers/volume/linstor"
_ "github.com/portworx/torpedo/drivers/volume/portworx"
"github.com/portworx/torpedo/pkg/log"
testrailutils "github.com/portworx/torpedo/pkg/testrailuttils"
"github.com/sirupsen/logrus"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -117,6 +119,17 @@ const (
tokenKey = "token"
clusterIP = "ip"
clusterPort = "port"

testResultPass = "Pass"
testResultFail = "Fail"

testrailRunNameVar = "TESTRAIL_RUN_NAME"
testrailRunIDVar = "TESTRAIL_RUN_ID"
testrailJenkinsBuildURLVar = "TESTRAIL_JENKINS_BUILD_URL"
testrailHostVar = "TESTRAIL_HOST"
testrailUserNameVar = "TESTRAIL_USERNAME"
testrailPasswordVar = "TESTRAIL_PASSWORD"
testrailMilestoneVar = "TESTRAIL_MILESTONE"
)

var nodeDriver node.Driver
Expand All @@ -140,6 +153,11 @@ var storkVersionCheck bool
var cloudDeletionValidate bool
var isInternalLBAws bool
var pxNamespace string
var storkVersion string
var testrailHostname string
var testrailUsername string
var testrailPassword string
var testrailSetupSuccessful bool

func TestSnapshot(t *testing.T) {
t.Run("testSnapshot", testSnapshot)
Expand Down Expand Up @@ -248,13 +266,16 @@ func setup() error {
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("Unable to get stork version configmap: %v", err)
}
if cm != nil && storkVersionCheck == true {
if cm != nil {
ver, ok := cm.Data["version"]
if !ok {
return fmt.Errorf("stork version not found in configmap: %s", cmName)
}
if getStorkVersion(ver) != getStorkVersion(version.Version) {
return fmt.Errorf("stork version mismatch, found: %s, expected: %s", getStorkVersion(ver), getStorkVersion(version.Version))
storkVersion = getStorkVersion(ver)
if storkVersionCheck == true {
if getStorkVersion(ver) != getStorkVersion(version.Version) {
return fmt.Errorf("stork version mismatch, found: %s, expected: %s", getStorkVersion(ver), getStorkVersion(version.Version))
}
}
}

Expand Down Expand Up @@ -287,6 +308,8 @@ func setup() error {
return fmt.Errorf("at the end of setup, setting kubeconfig to source failed in setup: %v", err)
}

SetupTestRail()

return nil
}

Expand Down Expand Up @@ -1062,6 +1085,86 @@ func IsEks() bool {
return false
}

// SetupTestRail checks if the required parameters for testrail are passed, verifies connectivity and creates milestone if it does not exist
func SetupTestRail() {
if testrailutils.RunName = os.Getenv(testrailRunNameVar); testrailutils.RunName != "" {
logrus.Infof("Testrail Run name: %s", testrailutils.RunName)
}
if testrailutils.JobRunID = os.Getenv(testrailRunIDVar); testrailutils.JobRunID != "" {
logrus.Infof("Testrail Run ID: %s", testrailutils.JobRunID)
}
if testrailutils.MilestoneName = os.Getenv(testrailMilestoneVar); testrailutils.MilestoneName != "" {
logrus.Infof("Testrail Milestone %s", testrailutils.MilestoneName)
}
if testrailutils.JenkinsBuildURL = os.Getenv(testrailJenkinsBuildURLVar); testrailutils.JenkinsBuildURL != "" {
logrus.Infof("Testrail Jenkins Build URL: %s", testrailutils.JenkinsBuildURL)
}
if testrailHostname = os.Getenv(testrailHostVar); testrailHostname != "" {
logrus.Infof("Testrail Host: %s", testrailHostname)
}
if testrailUsername = os.Getenv(testrailUserNameVar); testrailUsername != "" {
logrus.Infof("Testrail Host: %s", testrailUsername)
}
if testrailPassword = os.Getenv(testrailPasswordVar); testrailPassword != "" {
logrus.Infof("Testrail run name: %s", testrailPassword)
}
if testrailHostname != "" && testrailUsername != "" && testrailPassword != "" {
err := testrailutils.Init(testrailHostname, testrailUsername, testrailPassword)
if err == nil {
if testrailutils.MilestoneName == "" || testrailutils.RunName == "" || testrailutils.JobRunID == "" {
err = fmt.Errorf("not all details provided to update testrail")
log.FailOnError(err, "Error occurred while testrail initialization")
}
testrailutils.CreateMilestone()
testrailSetupSuccessful = true
}
logrus.Infof("Testrail setup is successful, will log results to testrail automatically. Details:\nMilestone: %s, Testrun: %s",
testrailutils.MilestoneName, testrailutils.RunName)
} else {
logrus.Warn("Not all information to connect to testrail is provided, skipping updates to testrail")
}
}

func updateTestRail(testStatus *string, ids ...int) {
if ids[0] != 0 && ids[1] != 0 {
testrailObject := testrailutils.Testrail{
Status: *testStatus,
TestID: ids[0],
RunID: ids[1],
DriverVersion: storkVersion,
}
testrailutils.AddTestEntry(testrailObject)
log.Infof("Testrail testrun url: %s/index.php?/runs/view/%d&group_by=cases:custom_automated&group_order=asc&group_id=%d", testrailHostname, ids[1], testrailutils.PwxProjectID)
} else {
logrus.Warnf("Skipping testrail update for this case, testID: %d, testrun: %d", ids[0], ids[1])
}
}

func testrailSetupForTest(testrailID int, testResult *string) int {
runID, err := addRunToMilestone(testrailID, testResult)
if err != nil {
logrus.Warnf("For current case: %d, not adding this run to testrail", testrailID)
return 0
}
return runID
}

func addRunToMilestone(testrailID int, testResult *string) (int, error) {
var runID int
var err error
if testrailutils.JobRunID != "" {
if testrailID == 0 {
return 0, fmt.Errorf("invalid testcase ID: %v", testrailID)
}
runID, err = strconv.Atoi(testrailutils.JobRunID)
if err != nil {
return 0, fmt.Errorf("invalid testrail run ID: %v", testrailutils.JobRunID)
}
}
runID = testrailutils.AddRunsToMilestone(testrailID)
return runID, nil
}

func TestMain(m *testing.M) {
flag.IntVar(&snapshotScaleCount,
"snapshot-scale-count",
Expand Down
72 changes: 72 additions & 0 deletions test/integration_test/extender_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ func TestExtender(t *testing.T) {
}

func noPVCTest(t *testing.T) {
var testrailID, testResult = 50785, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "nopvctest"),
scheduler.ScheduleOptions{AppKeys: []string{"mysql-nopvc"}})
require.NoError(t, err, "Error scheduling task")
Expand All @@ -54,9 +58,17 @@ func noPVCTest(t *testing.T) {
require.NoError(t, err, "Error waiting for pod to get to running state")

destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func singlePVCTest(t *testing.T) {
var testrailID, testResult = 50786, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "singlepvctest"),
scheduler.ScheduleOptions{AppKeys: []string{"mysql-1-pvc"}})
require.NoError(t, err, "Error scheduling task")
Expand All @@ -75,9 +87,17 @@ func singlePVCTest(t *testing.T) {
verifyScheduledNode(t, scheduledNodes[0], volumeNames)

destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func statefulsetTest(t *testing.T) {
var testrailID, testResult = 50787, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "sstest"),
scheduler.ScheduleOptions{AppKeys: []string{"elasticsearch"}})
require.NoError(t, err, "Error scheduling task")
Expand All @@ -104,9 +124,17 @@ func statefulsetTest(t *testing.T) {
verifyScheduledNode(t, scheduledNodes[0], volumeNames)

destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func multiplePVCTest(t *testing.T) {
var testrailID, testResult = 50788, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "multipvctest"),
scheduler.ScheduleOptions{AppKeys: []string{"mysql-2-pvc"}})
require.NoError(t, err, "Error scheduling task")
Expand All @@ -124,9 +152,17 @@ func multiplePVCTest(t *testing.T) {

verifyScheduledNode(t, scheduledNodes[0], volumeNames)
destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func driverNodeErrorTest(t *testing.T) {
var testrailID, testResult = 50789, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "drivererrtest"),
scheduler.ScheduleOptions{AppKeys: []string{"mysql-1-pvc"}})
require.NoError(t, err, "Error scheduling task")
Expand Down Expand Up @@ -173,9 +209,17 @@ func driverNodeErrorTest(t *testing.T) {
require.NoError(t, err, "Error waiting for Node to start %+v", scheduledNodes[0])

destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func pvcOwnershipTest(t *testing.T) {
var testrailID, testResult = 50781, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "ownershiptest"),
scheduler.ScheduleOptions{AppKeys: []string{"mysql-repl-1"}})
require.NoError(t, err, "Error scheduling task")
Expand Down Expand Up @@ -246,9 +290,17 @@ func pvcOwnershipTest(t *testing.T) {
require.NoError(t, err, "Volume driver is not up on Node %+v", scheduledNodes[0])

destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func antihyperconvergenceTest(t *testing.T) {
var testrailID, testResult = 85859, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule("antihyperconvergencetest",
scheduler.ScheduleOptions{
AppKeys: []string{"test-sv4-svc-repl1"},
Expand All @@ -271,9 +323,17 @@ func antihyperconvergenceTest(t *testing.T) {
verifyAntihyperconvergence(t, scheduledNodes, volumeNames)

destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) {
var testrailID, testResult = 85860, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule("preferremoteonlytest",
scheduler.ScheduleOptions{
AppKeys: []string{"test-sv4-svc-repl1-prefer-remote-only"},
Expand Down Expand Up @@ -326,6 +386,10 @@ func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) {
err = core.Instance().UnCordonNode(schedNode.Name, defaultWaitTimeout, defaultWaitInterval)
require.NoError(t, err, "Error uncordorning k8s node for stork test pod")
}

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func verifyAntihyperconvergence(t *testing.T, appNodes []node.Node, volumes []string) {
Expand Down Expand Up @@ -363,6 +427,10 @@ func verifyAntihyperconvergence(t *testing.T, appNodes []node.Node, volumes []st
}

func equalPodSpreadTest(t *testing.T) {
var testrailID, testResult = 84664, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule("equal-pod-spread-test",
scheduler.ScheduleOptions{
AppKeys: []string{"postgres"},
Expand Down Expand Up @@ -394,4 +462,8 @@ func equalPodSpreadTest(t *testing.T) {

logrus.Info("Deleting apps created by the test")
destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}
32 changes: 32 additions & 0 deletions test/integration_test/health_monitor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,10 @@ func TestHealthMonitor(t *testing.T) {
}

func stopDriverTest(t *testing.T) {
var testrailID, testResult = 50790, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "stopdrivertest"),
scheduler.ScheduleOptions{AppKeys: []string{"mysql-1-pvc"}})
require.NoError(t, err, "Error scheduling task")
Expand Down Expand Up @@ -85,9 +89,17 @@ func stopDriverTest(t *testing.T) {
require.NoError(t, err, "Error waiting for Node to start %+v", scheduledNodes[0])

destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func stopKubeletTest(t *testing.T) {
var testrailID, testResult = 50791, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

// Cordon node where the test is running. This is so that we don't end up stopping
// kubelet on the node where the stork-test pod is running
testPodNode := ""
Expand Down Expand Up @@ -136,9 +148,17 @@ func stopKubeletTest(t *testing.T) {

destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)

}

func healthCheckFixTest(t *testing.T) {
var testrailID, testResult = 85900, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

// When a node's storage is offline stork should not bounce pods right away.
// It now waits for a minute and checks again to see if the storage driver is still offline.
// Bringing back node's storage within a minute should not affect anything
Expand Down Expand Up @@ -217,9 +237,17 @@ func healthCheckFixTest(t *testing.T) {
}

destroyAndWait(t, ctxs)

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func stopDriverCsiPodFailoverTest(t *testing.T) {
var testrailID, testResult = 85901, testResultFail
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

// Verify CSI pods are running on online nodes
logrus.Infof("Checking if CSI pods are initially scheduled on online PX nodes")
verifyCsiPodsRunningOnOnlineNode(t)
Expand Down Expand Up @@ -269,6 +297,10 @@ func stopDriverCsiPodFailoverTest(t *testing.T) {

err = volumeDriver.StartDriver(nodeNameMap[nodeName])
require.NoError(t, err, "Error re-starting driver on Node %+v", nodeNameMap[podToFailover.Spec.NodeName])

// If we are here then the test has passed
testResult = testResultPass
logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult)
}

func verifyCsiPodsRunningOnOnlineNode(t *testing.T) {
Expand Down
Loading

0 comments on commit a5e0f81

Please sign in to comment.