diff --git a/tests/internals/pause_scaledjob/pause_scaledjob_test.go b/tests/internals/pause_scaledjob/pause_scaledjob_test.go index 4a3f77ba3bc..d9445ce7fe8 100644 --- a/tests/internals/pause_scaledjob/pause_scaledjob_test.go +++ b/tests/internals/pause_scaledjob/pause_scaledjob_test.go @@ -30,9 +30,9 @@ var ( scalerName = fmt.Sprintf("%s-scaler", testName) scaledJobName = fmt.Sprintf("%s-sj", testName) minReplicaCount = 0 - maxReplicaCount = 3 - iterationCountInitial = 15 - iterationCountLatter = 30 + maxReplicaCount = 1 + iterationCountInitial = 30 + iterationCountLatter = 60 ) type templateData struct { @@ -105,7 +105,7 @@ spec: image: busybox command: - sleep - - "15" + - "30" imagePullPolicy: IfNotPresent restartPolicy: Never backoffLimit: 1 @@ -119,10 +119,32 @@ spec: ) // Util function -func WaitForJobByFilterCountUntilIteration(t *testing.T, kc *kubernetes.Clientset, namespace string, - target, iterations, intervalSeconds int, listOptions metav1.ListOptions) bool { - var isTargetAchieved = false +func WaitUntilJobIsRunning(t *testing.T, kc *kubernetes.Clientset, namespace string, + target, iterations, intervalSeconds int) bool { + listOptions := metav1.ListOptions{ + FieldSelector: "status.successful=0", + } + for i := 0; i < iterations; i++ { + jobList, _ := kc.BatchV1().Jobs(namespace).List(context.Background(), listOptions) + count := len(jobList.Items) + t.Logf("Waiting for job count to hit target. Namespace - %s, Current - %d, Target - %d", + namespace, count, target) + + if count == target { + return true + } + time.Sleep(time.Duration(intervalSeconds) * time.Second) + } + + return false +} + +func WaitUntilJobIsSucceeded(t *testing.T, kc *kubernetes.Clientset, namespace string, + target, iterations, intervalSeconds int) bool { + listOptions := metav1.ListOptions{ + FieldSelector: "status.successful=1", + } for i := 0; i < iterations; i++ { jobList, _ := kc.BatchV1().Jobs(namespace).List(context.Background(), listOptions) count := len(jobList.Items) @@ -131,15 +153,33 @@ func WaitForJobByFilterCountUntilIteration(t *testing.T, kc *kubernetes.Clientse namespace, count, target) if count == target { - isTargetAchieved = true - } else { - isTargetAchieved = false + return true } + time.Sleep(time.Duration(intervalSeconds) * time.Second) + } + + return false +} + +func AssertJobNotChangeKeepingIsSucceeded(t *testing.T, kc *kubernetes.Clientset, namespace string, + target, iterations, intervalSeconds int) bool { + listOptions := metav1.ListOptions{ + FieldSelector: "status.successful=1", + } + for i := 0; i < iterations; i++ { + jobList, _ := kc.BatchV1().Jobs(namespace).List(context.Background(), listOptions) + count := len(jobList.Items) + t.Logf("Asserting the job count doesn't change. Namespace - %s, Current - %d, Target - %d", + namespace, count, target) + + if count != target { + return false + } time.Sleep(time.Duration(intervalSeconds) * time.Second) } - return isTargetAchieved + return true } func TestScaler(t *testing.T) { @@ -152,21 +192,22 @@ func TestScaler(t *testing.T) { data, templates := getTemplateData(metricValue) - listOptions := metav1.ListOptions{ - FieldSelector: "status.successful=0", - } - CreateKubernetesResources(t, kc, testNamespace, data, templates) - assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, data.MetricThreshold, iterationCountInitial, 1, listOptions), + // we ensure that the gRPC server is up and ready + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, scalerName, testNamespace, 1, 60, 1), + "replica count should be 0 after 1 minute") + + // we ensure that there is a job running + assert.True(t, WaitUntilJobIsRunning(t, kc, testNamespace, data.MetricThreshold, iterationCountInitial, 1), "job count should be %d after %d iterations", data.MetricThreshold, iterationCountInitial) // test scaling - testPause(t, kc, listOptions) - testUnpause(t, kc, data, listOptions) + testPause(t, kc) + testUnpause(t, kc, data) - testPause(t, kc, listOptions) - testUnpauseWithBool(t, kc, data, listOptions) + testPause(t, kc) + testUnpauseWithBool(t, kc, data) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) @@ -189,7 +230,7 @@ func getTemplateData(metricValue int) (templateData, []Template) { } } -func testPause(t *testing.T, kc *kubernetes.Clientset, listOptions metav1.ListOptions) { +func testPause(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing Paused annotation ---") _, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledjob %s autoscaling.keda.sh/paused=true --namespace %s", scaledJobName, testNamespace)) @@ -197,12 +238,15 @@ func testPause(t *testing.T, kc *kubernetes.Clientset, listOptions metav1.ListOp t.Log("job count does not change as job is paused") - expectedTarget := 0 - assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1, listOptions), + expectedTarget := 1 + assert.True(t, WaitUntilJobIsSucceeded(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1), "job count should be %d after %d iterations", expectedTarget, iterationCountLatter) + + assert.True(t, AssertJobNotChangeKeepingIsSucceeded(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1), + "job count should be %d during %d iterations", expectedTarget, iterationCountLatter) } -func testUnpause(t *testing.T, kc *kubernetes.Clientset, data templateData, listOptions metav1.ListOptions) { +func testUnpause(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing removing Paused annotation ---") _, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledjob %s autoscaling.keda.sh/paused- --namespace %s", scaledJobName, testNamespace)) @@ -211,11 +255,11 @@ func testUnpause(t *testing.T, kc *kubernetes.Clientset, data templateData, list t.Log("job count increases from zero as job is no longer paused") expectedTarget := data.MetricThreshold - assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1, listOptions), + assert.True(t, WaitUntilJobIsRunning(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1), "job count should be %d after %d iterations", expectedTarget, iterationCountLatter) } -func testUnpauseWithBool(t *testing.T, kc *kubernetes.Clientset, data templateData, listOptions metav1.ListOptions) { +func testUnpauseWithBool(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- test setting Paused annotation to false ---") _, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledjob %s autoscaling.keda.sh/paused=false --namespace %s --overwrite=true", scaledJobName, testNamespace)) @@ -224,6 +268,6 @@ func testUnpauseWithBool(t *testing.T, kc *kubernetes.Clientset, data templateDa t.Log("job count increases from zero as job is no longer paused") expectedTarget := data.MetricThreshold - assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1, listOptions), + assert.True(t, WaitUntilJobIsRunning(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1), "job count should be %d after %d iterations", expectedTarget, iterationCountLatter) } diff --git a/tests/internals/scaling_modifiers/scaling_modifiers_test.go b/tests/internals/scaling_modifiers/scaling_modifiers_test.go index 7e9c3817732..ad8f472885c 100644 --- a/tests/internals/scaling_modifiers/scaling_modifiers_test.go +++ b/tests/internals/scaling_modifiers/scaling_modifiers_test.go @@ -123,6 +123,10 @@ spec: - secretRef: name: {{.SecretName}} imagePullPolicy: Always + readinessProbe: + httpGet: + path: /api/value + port: 8080 ` soFallbackTemplate = ` apiVersion: keda.sh/v1alpha1 @@ -271,6 +275,10 @@ func TestScalingModifiers(t *testing.T) { data, templates := getTemplateData() CreateKubernetesResources(t, kc, namespace, data, templates) + // we ensure that the metrics api server is up and ready + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, metricsServerDeploymentName, namespace, 1, 60, 2), + "replica count should be 1 after 1 minute") + testFormula(t, kc, data) templates = append(templates, Template{Name: "soComplexFormula", Config: soComplexFormula}) @@ -309,6 +317,10 @@ func testFormula(t *testing.T, kc *kubernetes.Clientset, data templateData) { _, err = ExecuteCommand(fmt.Sprintf("kubectl scale deployment/%s --replicas=1 -n %s", metricsServerDeploymentName, namespace)) assert.NoErrorf(t, err, "cannot scale metricsServer deployment - %s", err) + // we ensure that the metrics api server is up and ready + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, metricsServerDeploymentName, namespace, 1, 60, 2), + "replica count should be 1 after 1 minute") + data.MetricValue = 2 KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) // 2+2=4; target = 2 -> 4/2 replicas should be 2 diff --git a/tests/internals/subresource_scale/subresource_scale_test.go b/tests/internals/subresource_scale/subresource_scale_test.go index 79571e47e30..a79d7df89d6 100644 --- a/tests/internals/subresource_scale/subresource_scale_test.go +++ b/tests/internals/subresource_scale/subresource_scale_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -117,7 +118,11 @@ func TestScaler(t *testing.T) { // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() - + t.Cleanup(func() { + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + cleanupArgo(t) + }) setupArgo(t, kc) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -127,10 +132,6 @@ func TestScaler(t *testing.T) { // test scaling testScaleOut(t, kc) testScaleIn(t, kc) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) - cleanupArgo(t) } func setupArgo(t *testing.T, kc *kubernetes.Clientset) { @@ -139,7 +140,7 @@ func setupArgo(t *testing.T, kc *kubernetes.Clientset) { argoNamespace) _, err := ExecuteCommand(cmdWithNamespace) - assert.NoErrorf(t, err, "cannot install argo resources - %s", err) + require.NoErrorf(t, err, "cannot install argo resources - %s", err) } func cleanupArgo(t *testing.T) { diff --git a/tests/scalers/activemq/activemq_test.go b/tests/scalers/activemq/activemq_test.go index 8007a76eed9..668624c33eb 100644 --- a/tests/scalers/activemq/activemq_test.go +++ b/tests/scalers/activemq/activemq_test.go @@ -13,6 +13,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -446,9 +447,13 @@ spec: ) func TestActiveMQScaler(t *testing.T) { - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources CreateKubernetesResources(t, kc, testNamespace, data, templates) // setup activemq @@ -461,16 +466,13 @@ func TestActiveMQScaler(t *testing.T) { testActivation(t, kc) testScaleOut(t, kc) testScaleIn(t, kc) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } func setupActiveMQ(t *testing.T, kc *kubernetes.Clientset) { - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, "activemq", testNamespace, 1, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, "activemq", testNamespace, 1, 60, 3), "activemq should be up") err := checkIfActiveMQStatusIsReady(t, activemqPodName) - assert.NoErrorf(t, err, "%s", err) + require.NoErrorf(t, err, "%s", err) } func checkIfActiveMQStatusIsReady(t *testing.T, name string) error { diff --git a/tests/scalers/apache_kafka/apache_kafka_test.go b/tests/scalers/apache_kafka/apache_kafka_test.go index ccea3387062..05de962a9a6 100644 --- a/tests/scalers/apache_kafka/apache_kafka_test.go +++ b/tests/scalers/apache_kafka/apache_kafka_test.go @@ -10,6 +10,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -386,10 +387,13 @@ spec: func TestScaler(t *testing.T) { // setup - t.Log("--- setting up ---") - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources CreateKubernetesResources(t, kc, testNamespace, data, templates) addCluster(t, data) addTopic(t, data, topic1, topicPartitions) @@ -407,7 +411,6 @@ func TestScaler(t *testing.T) { testOneOnInvalidOffset(t, kc, data) testPersistentLag(t, kc, data) testScalingOnlyPartitionsWithLag(t, kc, data) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testEarliestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -667,7 +670,7 @@ func addTopic(t *testing.T, data templateData, name string, partitions int) { data.KafkaTopicPartitions = partitions KubectlApplyWithTemplate(t, data, "kafkaTopicTemplate", kafkaTopicTemplate) _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafkatopic/%s --for=condition=Ready --timeout=480s --namespace %s", name, testNamespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) t.Log("--- kafka topic added ---") } @@ -675,7 +678,7 @@ func addCluster(t *testing.T, data templateData) { t.Log("--- adding kafka cluster ---") KubectlApplyWithTemplate(t, data, "kafkaClusterTemplate", kafkaClusterTemplate) _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafka/%s --for=condition=Ready --timeout=480s --namespace %s", kafkaName, testNamespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) t.Log("--- kafka cluster added ---") } diff --git a/tests/scalers/arangodb/arangodb_test.go b/tests/scalers/arangodb/arangodb_test.go index b62cfd129e4..93da215c829 100644 --- a/tests/scalers/arangodb/arangodb_test.go +++ b/tests/scalers/arangodb/arangodb_test.go @@ -201,14 +201,17 @@ spec: ) func TestArangoDBScaler(t *testing.T) { - // Create kubernetes resources kc := GetKubernetesClient(t) - + data, templates := getTemplateData() CreateNamespace(t, kc, testNamespace) + t.Cleanup(func() { + arangodb.UninstallArangoDB(t, testNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources arangodb.InstallArangoDB(t, kc, testNamespace) arangodb.SetupArangoDB(t, kc, testNamespace, arangoDBName, arangoDBCollection) - - data, templates := getTemplateData() KubectlApplyMultipleWithTemplate(t, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), @@ -217,13 +220,6 @@ func TestArangoDBScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc, data) - - // cleanup - KubectlDeleteMultipleWithTemplate(t, data, templates) - arangodb.UninstallArangoDB(t, testNamespace) - - DeleteNamespace(t, testNamespace) - WaitForNamespaceDeletion(t, testNamespace) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/arangodb/helper.go b/tests/scalers/arangodb/helper.go index 0f3bfdd13a3..c97536cb992 100644 --- a/tests/scalers/arangodb/helper.go +++ b/tests/scalers/arangodb/helper.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" "github.com/kedacore/keda/v2/tests/helper" @@ -87,24 +88,24 @@ spec: func InstallArangoDB(t *testing.T, kc *kubernetes.Clientset, testNamespace string) { t.Log("installing arangodb crds") _, err := helper.ExecuteCommand(fmt.Sprintf("helm install arangodb-crds https://github.com/arangodb/kube-arangodb/releases/download/1.2.20/kube-arangodb-crd-1.2.20.tgz --namespace=%s --wait", testNamespace)) - assert.NoErrorf(t, err, "cannot install crds - %s", err) + require.NoErrorf(t, err, "cannot install crds - %s", err) t.Log("installing arangodb operator") _, err = helper.ExecuteCommand(fmt.Sprintf("helm install arangodb https://github.com/arangodb/kube-arangodb/releases/download/1.2.20/kube-arangodb-1.2.20.tgz --set 'operator.architectures={arm64,amd64}' --set 'operator.resources.requests.cpu=1m' --set 'operator.resources.requests.memory=1Mi' --namespace=%s --wait", testNamespace)) - assert.NoErrorf(t, err, "cannot create operator deployment - %s", err) + require.NoErrorf(t, err, "cannot create operator deployment - %s", err) t.Log("creating arangodeployment resource") helper.KubectlApplyWithTemplate(t, templateData{Namespace: testNamespace}, "arangoDeploymentTemplate", arangoDeploymentTemplate) - assert.True(t, helper.WaitForPodCountInNamespace(t, kc, testNamespace, 3, 5, 20), "pod count should be 3") - assert.True(t, helper.WaitForAllPodRunningInNamespace(t, kc, testNamespace, 5, 20), "all pods should be running") + require.True(t, helper.WaitForPodCountInNamespace(t, kc, testNamespace, 3, 5, 20), "pod count should be 3") + require.True(t, helper.WaitForAllPodRunningInNamespace(t, kc, testNamespace, 5, 20), "all pods should be running") } func SetupArangoDB(t *testing.T, kc *kubernetes.Clientset, testNamespace, arangoDBName, arangoDBCollection string) { helper.KubectlApplyWithTemplate(t, templateData{Namespace: testNamespace, Database: arangoDBName}, "createDatabaseTemplate", createDatabaseTemplate) - assert.True(t, helper.WaitForJobSuccess(t, kc, "create-db", testNamespace, 5, 10), "create database job failed") + require.True(t, helper.WaitForJobSuccess(t, kc, "create-db", testNamespace, 5, 10), "create database job failed") helper.KubectlApplyWithTemplate(t, templateData{Namespace: testNamespace, Database: arangoDBName, Collection: arangoDBCollection}, "createCollectionTemplate", createCollectionTemplate) - assert.True(t, helper.WaitForJobSuccess(t, kc, "create-arangodb-collection", testNamespace, 5, 10), "create collection job failed") + require.True(t, helper.WaitForJobSuccess(t, kc, "create-arangodb-collection", testNamespace, 5, 10), "create collection job failed") } func UninstallArangoDB(t *testing.T, namespace string) { diff --git a/tests/scalers/cassandra/cassandra_test.go b/tests/scalers/cassandra/cassandra_test.go index 05740b15f7d..748faad028a 100644 --- a/tests/scalers/cassandra/cassandra_test.go +++ b/tests/scalers/cassandra/cassandra_test.go @@ -12,6 +12,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -173,6 +174,11 @@ func TestCassandraScaler(t *testing.T) { kc := GetKubernetesClient(t) data, templates := getTemplateData() CreateNamespace(t, kc, testNamespace) + // cleanup + t.Cleanup(func() { + uninstallCassandra(t) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // setup cassandra installCassandra(t) @@ -188,19 +194,15 @@ func TestCassandraScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc, data) - - // cleanup - uninstallCassandra(t) - DeleteKubernetesResources(t, testNamespace, data, templates) } func installCassandra(t *testing.T) { _, err := ExecuteCommand("helm repo add bitnami https://charts.bitnami.com/bitnami") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) - _, err = ExecuteCommand(fmt.Sprintf("helm install cassandra --set persistence.enabled=false --set dbUser.user=%s --set dbUser.password=%s --namespace %s bitnami/cassandra --wait", cassandraUsername, cassandraPassword, testNamespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) + _, err = ExecuteCommand(fmt.Sprintf("helm install cassandra --set resourcesPreset=none --set persistence.enabled=false --set dbUser.user=%s --set dbUser.password=%s --namespace %s bitnami/cassandra --wait", cassandraUsername, cassandraPassword, testNamespace)) + require.NoErrorf(t, err, "cannot execute command - %s", err) } func uninstallCassandra(t *testing.T) { @@ -212,12 +214,12 @@ func setupCassandra(t *testing.T, kc *kubernetes.Clientset, data templateData) { // Create the key space data.Command = fmt.Sprintf("cqlsh -u %s -p %s cassandra.%s --execute=\\\"%s\\\"", cassandraUsername, cassandraPassword, testNamespace, createKeyspace) KubectlApplyWithTemplate(t, data, "jobTemplate", jobTemplate) - assert.True(t, WaitForJobSuccess(t, kc, "client", testNamespace, 6, 10), "create database job failed") + require.True(t, WaitForJobSuccess(t, kc, "client", testNamespace, 6, 10), "create database job failed") KubectlDeleteWithTemplate(t, data, "jobTemplate", jobTemplate) // Create the table data.Command = fmt.Sprintf("cqlsh -u %s -p %s cassandra.%s --execute=\\\"%s\\\"", cassandraUsername, cassandraPassword, testNamespace, createTableCQL) KubectlApplyWithTemplate(t, data, "jobTemplate", jobTemplate) - assert.True(t, WaitForJobSuccess(t, kc, "client", testNamespace, 6, 10), "create database job failed") + require.True(t, WaitForJobSuccess(t, kc, "client", testNamespace, 6, 10), "create database job failed") KubectlDeleteWithTemplate(t, data, "jobTemplate", jobTemplate) t.Log("--- cassandra is ready ---") diff --git a/tests/scalers/couchdb/couchdb_test.go b/tests/scalers/couchdb/couchdb_test.go index b957e72dfe0..1e13e508045 100644 --- a/tests/scalers/couchdb/couchdb_test.go +++ b/tests/scalers/couchdb/couchdb_test.go @@ -14,6 +14,7 @@ import ( "github.com/google/uuid" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -151,20 +152,25 @@ spec: ) func TestCouchDBScaler(t *testing.T) { - // Create kubernetes resources kc := GetKubernetesClient(t) + t.Cleanup(func() { + data, templates := getTemplateData(t, kc) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + // setup couchdb CreateNamespace(t, kc, testNamespace) installCouchDB(t) - data, templates := getTemplateData(kc) + // Create kubernetes resources + data, templates := getTemplateData(t, kc) KubectlApplyMultipleWithTemplate(t, data, templates) // wait until client is ready time.Sleep(10 * time.Second) // create database - _, _, err := ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X PUT http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals", getPassword(kc), testNamespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + _, _, err := ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X PUT http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals", getPassword(t, kc), testNamespace)) + require.NoErrorf(t, err, "cannot execute command - %s", err) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -173,23 +179,21 @@ func TestCouchDBScaler(t *testing.T) { testActivation(t, kc) testScaleUp(t, kc) testScaleDown(t, kc) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } func installCouchDB(t *testing.T) { _, err := ExecuteCommand(fmt.Sprintf("helm repo add couchdb %s", couchdbHelmRepo)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) uuid := strings.ReplaceAll(uuid.New().String(), "-", "") _, err = ExecuteCommand(fmt.Sprintf("helm install test-release --set couchdbConfig.couchdb.uuid=%s --namespace %s couchdb/couchdb --wait", uuid, testNamespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } -func getPassword(kc *kubernetes.Clientset) string { - secret, _ := kc.CoreV1().Secrets(testNamespace).Get(context.Background(), "test-release-couchdb", metav1.GetOptions{}) +func getPassword(t *testing.T, kc *kubernetes.Clientset) string { + secret, err := kc.CoreV1().Secrets(testNamespace).Get(context.Background(), "test-release-couchdb", metav1.GetOptions{}) + require.NoError(t, err) encodedPassword := secret.Data["adminPassword"] password := string(encodedPassword) return password @@ -202,7 +206,7 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset) { "feet":4, "greeting":"moo" }` - _, _, err := ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X PUT http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals/001 -d '%s'", getPassword(kc), testNamespace, record)) + _, _, err := ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X PUT http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals/001 -d '%s'", getPassword(t, kc), testNamespace, record)) assert.NoErrorf(t, err, "cannot execute command - %s", err) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) @@ -215,7 +219,7 @@ func testScaleUp(t *testing.T, kc *kubernetes.Clientset) { "feet":4, "greeting":"meow" }` - _, _, err := ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X PUT http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals/002 -d '%s'", getPassword(kc), testNamespace, record)) + _, _, err := ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X PUT http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals/002 -d '%s'", getPassword(t, kc), testNamespace, record)) assert.NoErrorf(t, err, "cannot execute command - %s", err) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 2), "replica count should be %d after 2 minute", maxReplicaCount) @@ -225,17 +229,17 @@ func testScaleDown(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale down ---") // recreate database to clear it - _, _, err := ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X DELETE http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals", getPassword(kc), testNamespace)) + _, _, err := ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X DELETE http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals", getPassword(t, kc), testNamespace)) assert.NoErrorf(t, err, "cannot execute command - %s", err) - _, _, err = ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X PUT http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals", getPassword(kc), testNamespace)) + _, _, err = ExecCommandOnSpecificPod(t, clientName, testNamespace, fmt.Sprintf("curl -X PUT http://admin:%s@test-release-svc-couchdb.%s.svc.cluster.local:5984/animals", getPassword(t, kc), testNamespace)) assert.NoErrorf(t, err, "cannot execute command - %s", err) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 2), "replica count should be %d after 2 minutes", minReplicaCount) } -func getTemplateData(kc *kubernetes.Clientset) (templateData, []Template) { - password := getPassword(kc) +func getTemplateData(t *testing.T, kc *kubernetes.Clientset) (templateData, []Template) { + password := getPassword(t, kc) passwordEncoded := base64.StdEncoding.EncodeToString([]byte(password)) connectionString := fmt.Sprintf("http://test-release-svc-couchdb.%s.svc.cluster.local:5984", testNamespace) hostName := fmt.Sprintf("test-release-svc-couchdb.%s.svc.cluster.local", testNamespace) diff --git a/tests/scalers/datadog/datadog_test.go b/tests/scalers/datadog/datadog_test.go index 0d015507749..99c46dc2366 100644 --- a/tests/scalers/datadog/datadog_test.go +++ b/tests/scalers/datadog/datadog_test.go @@ -252,12 +252,20 @@ func TestDatadogScaler(t *testing.T) { require.NotEmpty(t, datadogAppKey, "DATADOG_APP_KEY env variable is required for datadog tests") require.NotEmpty(t, datadogAPIKey, "DATADOG_API_KEY env variable is required for datadog tests") require.NotEmpty(t, datadogSite, "DATADOG_SITE env variable is required for datadog tests") - // Create kubernetes resources + kc := GetKubernetesClient(t) data, templates := getTemplateData() - CreateKubernetesResources(t, kc, testNamespace, data, templates) + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // install datadog + CreateNamespace(t, kc, testNamespace) installDatadog(t) + // Create kubernetes resources + KubectlApplyMultipleWithTemplate(t, data, templates) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 180, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -265,9 +273,6 @@ func TestDatadogScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc, data) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -295,9 +300,9 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { func installDatadog(t *testing.T) { _, err := ExecuteCommand(fmt.Sprintf("helm repo add datadog %s", datadogHelmRepo)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --set datadog.apiKey=%s --set datadog.appKey=%s --set datadog.site=%s --set datadog.clusterName=%s --set datadog.kubelet.tlsVerify=false --namespace %s --wait %s datadog/datadog`, datadogAPIKey, datadogAppKey, @@ -305,7 +310,7 @@ func installDatadog(t *testing.T) { kuberneteClusterName, testNamespace, testName)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/elasticsearch/elasticsearch_test.go b/tests/scalers/elasticsearch/elasticsearch_test.go index 0c4dea9503a..de2d997066c 100644 --- a/tests/scalers/elasticsearch/elasticsearch_test.go +++ b/tests/scalers/elasticsearch/elasticsearch_test.go @@ -13,6 +13,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -294,9 +295,13 @@ spec: ) func TestElasticsearchScaler(t *testing.T) { - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources CreateKubernetesResources(t, kc, testNamespace, data, templates) // setup elastic @@ -309,19 +314,16 @@ func TestElasticsearchScaler(t *testing.T) { testActivation(t, kc) testScaleOut(t, kc) testScaleIn(t, kc) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } func setupElasticsearch(t *testing.T, kc *kubernetes.Clientset) { - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, "elasticsearch", testNamespace, 1, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, "elasticsearch", testNamespace, 1, 60, 3), "elasticsearch should be up") // Create the index and the search template _, err := ExecuteCommand(fmt.Sprintf("%s -XPUT http://localhost:9200/%s -d '%s'", kubectlElasticExecCmd, indexName, elasticsearchCreateIndex)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand(fmt.Sprintf("%s -XPUT http://localhost:9200/_scripts/%s -d '%s'", kubectlElasticExecCmd, searchTemplateName, elasticsearchSearchTemplate)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } func testActivation(t *testing.T, kc *kubernetes.Clientset) { diff --git a/tests/scalers/etcd/etcd_cluster/etcd_cluster_test.go b/tests/scalers/etcd/etcd_cluster/etcd_cluster_test.go index 94f85393de6..bf86aa3a304 100644 --- a/tests/scalers/etcd/etcd_cluster/etcd_cluster_test.go +++ b/tests/scalers/etcd/etcd_cluster/etcd_cluster_test.go @@ -8,10 +8,10 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" - etcd "github.com/kedacore/keda/v2/tests/scalers/etcd/helper" ) const ( @@ -23,6 +23,8 @@ var ( scaledObjectName = fmt.Sprintf("%s-so", testName) deploymentName = fmt.Sprintf("%s-deployment", testName) jobName = fmt.Sprintf("%s-job", testName) + etcdClientName = fmt.Sprintf("%s-client", testName) + etcdEndpoints = fmt.Sprintf("etcd-0.etcd-headless.%s:2379,etcd-1.%s:2379,etcd-2.etcd-headless.%s:2379", testNamespace, testNamespace, testNamespace) minReplicaCount = 0 maxReplicaCount = 2 ) @@ -35,7 +37,8 @@ type templateData struct { MinReplicaCount int MaxReplicaCount int EtcdName string - Value int + EtcdClientName string + EtcdEndpoints string } const ( @@ -82,31 +85,26 @@ spec: triggers: - type: etcd metadata: - endpoints: {{.EtcdName}}-0.etcd-headless.{{.TestNamespace}}:2379,{{.EtcdName}}-1.etcd-headless.{{.TestNamespace}}:2379,{{.EtcdName}}-2.etcd-headless.{{.TestNamespace}}:2379 + endpoints: {{.EtcdEndpoints}} watchKey: var value: '1.5' activationValue: '5' watchProgressNotifyInterval: '10' ` - setJobTemplate = `apiVersion: batch/v1 -kind: Job + etcdClientTemplate = ` +apiVersion: v1 +kind: Pod metadata: - name: {{.JobName}} + name: {{.EtcdClientName}} namespace: {{.TestNamespace}} spec: - template: - spec: - containers: - - name: etcd - image: gcr.io/etcd-development/etcd:v3.4.20 - imagePullPolicy: IfNotPresent - command: - - sh - - -c - - "/usr/local/bin/etcdctl put var {{.Value}} --endpoints=http://{{.EtcdName}}-0.etcd-headless.{{.TestNamespace}}:2380,http://{{.EtcdName}}-1.etcd-headless.{{.TestNamespace}}:2380,http://{{.EtcdName}}-2.etcd-headless.{{.TestNamespace}}:2380" - restartPolicy: Never - backoffLimit: 4 -` + containers: + - name: {{.EtcdClientName}} + image: gcr.io/etcd-development/etcd:v3.4.10 + command: + - sh + - -c + - "exec tail -f /dev/null"` ) func TestScaler(t *testing.T) { @@ -114,51 +112,45 @@ func TestScaler(t *testing.T) { t.Log("--- setting up ---") // Create kubernetes resources kc := GetKubernetesClient(t) - + data, templates := getTemplateData() + t.Cleanup(func() { + KubectlDeleteWithTemplate(t, data, "etcdClientTemplate", etcdClientTemplate) + RemoveCluster(t, kc) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) CreateNamespace(t, kc, testNamespace) // Create Etcd Cluster - etcd.InstallCluster(t, kc, testName, testNamespace) + KubectlApplyWithTemplate(t, data, "etcdClientTemplate", etcdClientTemplate) + InstallCluster(t, kc) + setVarValue(t, 0) // Create kubernetes resources for testing - data, templates := getTemplateData() KubectlApplyMultipleWithTemplate(t, data, templates) - testActivation(t, kc, data) - testScaleOut(t, kc, data) - testScaleIn(t, kc, data) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) + testActivation(t, kc) + testScaleOut(t, kc) + testScaleIn(t, kc) } -func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { +func testActivation(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing activation ---") - data.Value = 4 - KubectlApplyWithTemplate(t, data, jobName, setJobTemplate) - assert.True(t, WaitForJobSuccess(t, kc, jobName, data.TestNamespace, 6, 10), "update job failed") - KubectlReplaceWithTemplate(t, data, jobName, setJobTemplate) + setVarValue(t, 4) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { +func testScaleOut(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") - data.Value = 9 - KubectlReplaceWithTemplate(t, data, jobName, setJobTemplate) - assert.True(t, WaitForJobSuccess(t, kc, jobName, data.TestNamespace, 6, 10), "update job failed") - KubectlReplaceWithTemplate(t, data, jobName, setJobTemplate) + setVarValue(t, 9) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), "replica count should be %d after 3 minutes", maxReplicaCount) } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { +func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale in ---") - data.Value = 0 - KubectlReplaceWithTemplate(t, data, jobName, setJobTemplate) - assert.True(t, WaitForJobSuccess(t, kc, jobName, data.TestNamespace, 6, 10), "update job failed") - KubectlReplaceWithTemplate(t, data, jobName, setJobTemplate) + setVarValue(t, 0) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -171,6 +163,8 @@ func getTemplateData() (templateData, []Template) { ScaledObjectName: scaledObjectName, JobName: jobName, EtcdName: testName, + EtcdClientName: etcdClientName, + EtcdEndpoints: etcdEndpoints, MinReplicaCount: minReplicaCount, MaxReplicaCount: maxReplicaCount, }, []Template{ @@ -178,3 +172,20 @@ func getTemplateData() (templateData, []Template) { {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, } } + +func setVarValue(t *testing.T, value int) { + _, _, err := ExecCommandOnSpecificPod(t, etcdClientName, testNamespace, fmt.Sprintf(`etcdctl put var %d --endpoints=%s`, value, etcdEndpoints)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) +} + +func InstallCluster(t *testing.T, kc *kubernetes.Clientset) { + _, err := ExecuteCommand(fmt.Sprintf(`helm upgrade --install --set persistence.enabled=false --set resourcesPreset=none --set auth.rbac.create=false --set replicaCount=3 --namespace %s --wait etcd oci://registry-1.docker.io/bitnamicharts/etcd`, + testNamespace)) + require.NoErrorf(t, err, "cannot execute command - %s", err) +} + +func RemoveCluster(t *testing.T, kc *kubernetes.Clientset) { + _, err := ExecuteCommand(fmt.Sprintf(`helm delete --namespace %s --wait etcd`, + testNamespace)) + require.NoErrorf(t, err, "cannot execute command - %s", err) +} diff --git a/tests/scalers/etcd/helper/helper.go b/tests/scalers/etcd/helper/helper.go deleted file mode 100644 index 0e4842d3d51..00000000000 --- a/tests/scalers/etcd/helper/helper.go +++ /dev/null @@ -1,138 +0,0 @@ -//go:build e2e -// +build e2e - -package helper - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "k8s.io/client-go/kubernetes" - - "github.com/kedacore/keda/v2/tests/helper" -) - -type templateData struct { - Namespace string - EtcdName string -} - -const ( - statefulSetTemplate = `apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - app: {{.EtcdName}} - name: {{.EtcdName}} - namespace: {{.Namespace}} -spec: - replicas: 3 - selector: - matchLabels: - app: {{.EtcdName}} - serviceName: etcd-headless - template: - metadata: - labels: - app: {{.EtcdName}} - name: {{.EtcdName}} - spec: - containers: - - env: - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: gcr.io/etcd-development/etcd:v3.4.20 - command: - - sh - - -c - - "/usr/local/bin/etcd --name $MY_POD_NAME \ - --data-dir /etcd-data \ - --listen-client-urls http://$MY_POD_IP:2379 \ - --advertise-client-urls http://$MY_POD_IP:2379 \ - --listen-peer-urls http://$MY_POD_IP:2380 \ - --initial-advertise-peer-urls http://$MY_POD_IP:2380 \ - --initial-cluster {{.EtcdName}}-0=http://{{.EtcdName}}-0.etcd-headless.{{.Namespace}}:2380,{{.EtcdName}}-1=http://{{.EtcdName}}-1.etcd-headless.{{.Namespace}}:2380,{{.EtcdName}}-2=http://{{.EtcdName}}-2.etcd-headless.{{.Namespace}}:2380 \ - --initial-cluster-token tkn \ - --initial-cluster-state new \ - --experimental-watch-progress-notify-interval 10s \ - --log-level info \ - --logger zap \ - --log-outputs stderr" - imagePullPolicy: IfNotPresent - name: etcd - ports: - - containerPort: 2380 - name: peer - protocol: TCP - - containerPort: 2379 - name: client - protocol: TCP - volumeMounts: - - mountPath: /etcd-data - name: cache-volume - volumes: - - name: cache-volume - emptyDir: {} -` - headlessServiceTemplate = `apiVersion: v1 -kind: Service -metadata: - labels: - app: {{.EtcdName}} - name: etcd-headless - namespace: {{.Namespace}} -spec: - clusterIP: None - ports: - - name: infra-etcd-cluster-2379 - port: 2379 - protocol: TCP - targetPort: 2379 - - name: infra-etcd-cluster-2380 - port: 2380 - protocol: TCP - targetPort: 2380 - selector: - app: {{.EtcdName}} - type: ClusterIP -` - serviceTemplate = `apiVersion: v1 -kind: Service -metadata: - labels: - app: {{.EtcdName}} - name: etcd-svc - namespace: {{.Namespace}} -spec: - ports: - - name: etcd-cluster - port: 2379 - targetPort: 2379 - selector: - app: {{.EtcdName}} - sessionAffinity: None - type: NodePort -` -) - -var etcdClusterTemplates = []helper.Template{ - {Name: "statefulSetTemplate", Config: statefulSetTemplate}, - {Name: "headlessServiceTemplate", Config: headlessServiceTemplate}, - {Name: "serviceTemplate", Config: serviceTemplate}, -} - -func InstallCluster(t *testing.T, kc *kubernetes.Clientset, name, namespace string) { - var data = templateData{ - Namespace: namespace, - EtcdName: name, - } - helper.KubectlApplyMultipleWithTemplate(t, data, etcdClusterTemplates) - assert.True(t, helper.WaitForStatefulsetReplicaReadyCount(t, kc, name, namespace, 3, 60, 5), - "etcd-cluster should be up") -} diff --git a/tests/scalers/influxdb/influxdb_test.go b/tests/scalers/influxdb/influxdb_test.go index 133c0270304..639ae68a001 100644 --- a/tests/scalers/influxdb/influxdb_test.go +++ b/tests/scalers/influxdb/influxdb_test.go @@ -11,6 +11,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -189,6 +190,9 @@ func TestInfluxScaler(t *testing.T) { // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -204,17 +208,16 @@ func TestInfluxScaler(t *testing.T) { testScaleFloat(t, kc, data) // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } func updateDataWithInfluxAuth(t *testing.T, kc *kubernetes.Clientset, data *templateData) { // run writeJob KubectlReplaceWithTemplate(t, data, "influxdbWriteJobTemplate", influxdbWriteJobTemplate) - assert.True(t, WaitForJobSuccess(t, kc, influxdbJobName, testNamespace, 30, 2), "Job should run successfully") + require.True(t, WaitForJobSuccess(t, kc, influxdbJobName, testNamespace, 30, 2), "Job should run successfully") // get pod logs log, err := FindPodLogs(kc, testNamespace, label, false) - assert.NoErrorf(t, err, "cannotget logs - %s", err) + require.NoErrorf(t, err, "cannotget logs - %s", err) var lines []string sc := bufio.NewScanner(strings.NewReader(log[0])) diff --git a/tests/scalers/kafka/kafka_test.go b/tests/scalers/kafka/kafka_test.go index 52720a3589a..550b3fb8138 100644 --- a/tests/scalers/kafka/kafka_test.go +++ b/tests/scalers/kafka/kafka_test.go @@ -10,6 +10,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -390,6 +391,9 @@ func TestScaler(t *testing.T) { kc := GetKubernetesClient(t) data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) addCluster(t, data) addTopic(t, data, topic1, topicPartitions) addTopic(t, data, topic2, topicPartitions) @@ -406,7 +410,6 @@ func TestScaler(t *testing.T) { testOneOnInvalidOffset(t, kc, data) testPersistentLag(t, kc, data) testScalingOnlyPartitionsWithLag(t, kc, data) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testEarliestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -665,7 +668,7 @@ func addTopic(t *testing.T, data templateData, name string, partitions int) { data.KafkaTopicPartitions = partitions KubectlApplyWithTemplate(t, data, "kafkaTopicTemplate", kafkaTopicTemplate) _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafkatopic/%s --for=condition=Ready --timeout=480s --namespace %s", name, testNamespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) t.Log("--- kafka topic added ---") } @@ -673,7 +676,7 @@ func addCluster(t *testing.T, data templateData) { t.Log("--- adding kafka cluster ---") KubectlApplyWithTemplate(t, data, "kafkaClusterTemplate", kafkaClusterTemplate) _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafka/%s --for=condition=Ready --timeout=480s --namespace %s", kafkaName, testNamespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) t.Log("--- kafka cluster added ---") } diff --git a/tests/scalers/loki/loki_test.go b/tests/scalers/loki/loki_test.go index 4d32072f724..278119ca913 100644 --- a/tests/scalers/loki/loki_test.go +++ b/tests/scalers/loki/loki_test.go @@ -9,6 +9,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -145,12 +146,16 @@ spec: // is directly tied to the KEDA HPA while the other is isolated that can be used for metrics // even when the KEDA deployment is at zero - the service points to both deployments func TestLokiScaler(t *testing.T) { - // Create kubernetes resources kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + uninstallLoki(t, testNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + installLoki(t, kc, testNamespace) // Create kubernetes resources for testing - data, templates := getTemplateData() KubectlApplyMultipleWithTemplate(t, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -158,10 +163,6 @@ func TestLokiScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - KubectlDeleteMultipleWithTemplate(t, data, templates) - uninstallLoki(t, testNamespace) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -203,16 +204,14 @@ func getTemplateData() (templateData, []Template) { func installLoki(t *testing.T, kc *kubernetes.Clientset, namespace string) { CreateNamespace(t, kc, namespace) _, err := ExecuteCommand("helm repo add grafana https://grafana.github.io/helm-charts") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand(fmt.Sprintf("helm upgrade --install loki grafana/loki-stack --wait --namespace=%s", namespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } func uninstallLoki(t *testing.T, namespace string) { _, err := ExecuteCommand(fmt.Sprintf("helm uninstall loki --wait --namespace=%s", namespace)) assert.NoErrorf(t, err, "cannot execute command - %s", err) - DeleteNamespace(t, namespace) - WaitForNamespaceDeletion(t, namespace) } diff --git a/tests/scalers/mongodb/mongodb_test.go b/tests/scalers/mongodb/mongodb_test.go index ebfe22925f2..679569a4d58 100644 --- a/tests/scalers/mongodb/mongodb_test.go +++ b/tests/scalers/mongodb/mongodb_test.go @@ -12,6 +12,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -114,7 +115,10 @@ func TestScaler(t *testing.T) { // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() - + t.Cleanup(func() { + cleanupMongo(t) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) mongoPod := setupMongo(t, kc) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -124,10 +128,6 @@ func TestScaler(t *testing.T) { // test scaling testActivation(t, kc, mongoPod) testScaleOut(t, kc, mongoPod) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) - cleanupMongo(t) } func getTemplateData() (templateData, []Template) { @@ -155,14 +155,14 @@ func getTemplateData() (templateData, []Template) { func setupMongo(t *testing.T, kc *kubernetes.Clientset) string { CreateNamespace(t, kc, mongoNamespace) _, err := ExecuteCommand("helm repo add bitnami https://charts.bitnami.com/bitnami") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand(fmt.Sprintf("helm install mongodb --set architecture=standalone --set auth.enabled=false --set persistence.enabled=false --namespace %s bitnami/mongodb --wait", mongoNamespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) podList, err := kc.CoreV1().Pods(mongoNamespace).List(context.Background(), metav1.ListOptions{}) - assert.NoErrorf(t, err, "cannot get mongo pod - %s", err) + require.NoErrorf(t, err, "cannot get mongo pod - %s", err) if len(podList.Items) != 1 { t.Error("cannot get mongo pod name") @@ -174,11 +174,11 @@ func setupMongo(t *testing.T, kc *kubernetes.Clientset) string { createUserCmd := fmt.Sprintf("db.createUser({ user:\"%s\",pwd:\"%s\",roles:[{ role:\"readWrite\", db: \"%s\"}]})", mongoUser, mongoPassword, mongoDBName) _, err = ExecuteCommand(fmt.Sprintf("kubectl exec %s -n %s -- mongosh --eval '%s'", mongoPod, mongoNamespace, createUserCmd)) - assert.NoErrorf(t, err, "cannot create user - %s", err) + require.NoErrorf(t, err, "cannot create user - %s", err) loginCmd := fmt.Sprintf("db.auth(\"%s\",\"%s\")", mongoUser, mongoPassword) _, err = ExecuteCommand(fmt.Sprintf("kubectl exec %s -n %s -- mongosh --eval '%s'", mongoPod, mongoNamespace, loginCmd)) - assert.NoErrorf(t, err, "cannot login - %s", err) + require.NoErrorf(t, err, "cannot login - %s", err) return mongoPod } diff --git a/tests/scalers/mssql/mssql_test.go b/tests/scalers/mssql/mssql_test.go index 7df7d437369..cb4c3f41495 100644 --- a/tests/scalers/mssql/mssql_test.go +++ b/tests/scalers/mssql/mssql_test.go @@ -9,6 +9,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -249,24 +250,28 @@ spec: func TestMssqlScaler(t *testing.T) { // Create kubernetes resources for MS SQL server kc := GetKubernetesClient(t) - data, mssqlTemplates := getMssqlTemplateData() + _, mssqlTemplates := getMssqlTemplateData() + _, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + CreateKubernetesResources(t, kc, testNamespace, data, mssqlTemplates) - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, mssqlServerName, testNamespace, 1, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, mssqlServerName, testNamespace, 1, 60, 3), "replica count should be %d after 3 minutes", 1) createDatabaseCommand := fmt.Sprintf("/opt/mssql-tools/bin/sqlcmd -S . -U sa -P \"%s\" -Q \"CREATE DATABASE [%s]\"", mssqlPassword, mssqlDatabase) ok, out, errOut, err := WaitForSuccessfulExecCommandOnSpecificPod(t, mssqlServerPodName, testNamespace, createDatabaseCommand, 60, 3) - assert.True(t, ok, "executing a command on MS SQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) + require.True(t, ok, "executing a command on MS SQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) createTableCommand := fmt.Sprintf("/opt/mssql-tools/bin/sqlcmd -S . -U sa -P \"%s\" -d %s -Q \"CREATE TABLE tasks ([id] int identity primary key, [status] varchar(10))\"", mssqlPassword, mssqlDatabase) ok, out, errOut, err = WaitForSuccessfulExecCommandOnSpecificPod(t, mssqlServerPodName, testNamespace, createTableCommand, 60, 3) - assert.True(t, ok, "executing a command on MS SQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) + require.True(t, ok, "executing a command on MS SQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) // Create kubernetes resources for testing - data, templates := getTemplateData() KubectlApplyMultipleWithTemplate(t, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -274,9 +279,6 @@ func TestMssqlScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } // insert 10 records in the table -> activation should not happen (activationTargetValue = 15) diff --git a/tests/scalers/mysql/mysql_test.go b/tests/scalers/mysql/mysql_test.go index 6e3ac1afec5..c192b551523 100644 --- a/tests/scalers/mysql/mysql_test.go +++ b/tests/scalers/mysql/mysql_test.go @@ -11,6 +11,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -231,6 +232,9 @@ func TestMySQLScaler(t *testing.T) { kc := GetKubernetesClient(t) CreateNamespace(t, kc, testNamespace) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // setup MySQL setupMySQL(t, kc, data, templates) @@ -239,16 +243,13 @@ func TestMySQLScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } func setupMySQL(t *testing.T, kc *kubernetes.Clientset, data templateData, templates []Template) { // Deploy mysql KubectlApplyWithTemplate(t, data, "mysqlDeploymentTemplate", mysqlDeploymentTemplate) KubectlApplyWithTemplate(t, data, "mysqlServiceTemplate", mysqlServiceTemplate) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, "mysql", testNamespace, 1, 30, 2), "mysql is not in a ready state") + require.True(t, WaitForDeploymentReplicaReadyCount(t, kc, "mysql", testNamespace, 1, 30, 2), "mysql is not in a ready state") // Wait 30 sec which would be enought for mysql to be accessible time.Sleep(30 * time.Second) @@ -257,14 +258,14 @@ func setupMySQL(t *testing.T, kc *kubernetes.Clientset, data templateData, templ out, err := ExecuteCommand(fmt.Sprintf("kubectl get pods -n %s -o jsonpath='{.items[0].metadata.name}'", testNamespace)) mysqlPod := string(out) if assert.NoErrorf(t, err, "cannot execute command - %s", err) { - assert.NotEmpty(t, mysqlPod) + require.NotEmpty(t, mysqlPod) } _, err = ExecuteCommand(fmt.Sprintf("kubectl exec -n %s %s -- mysql -u%s -p%s -e '%s'", testNamespace, mysqlPod, mySQLUsername, mySQLPassword, createTableSQL)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) // Deploy mysql consumer app, scaled object and trigger auth, etc. KubectlApplyMultipleWithTemplate(t, data, templates) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), + require.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should start out as 0") } diff --git a/tests/scalers/nats_jetstream/nats_jetstream_cluster/nats_jetstream_cluster_test.go b/tests/scalers/nats_jetstream/nats_jetstream_cluster/nats_jetstream_cluster_test.go index b7bd1755f90..a1cc087872c 100644 --- a/tests/scalers/nats_jetstream/nats_jetstream_cluster/nats_jetstream_cluster_test.go +++ b/tests/scalers/nats_jetstream/nats_jetstream_cluster/nats_jetstream_cluster_test.go @@ -9,6 +9,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" k8s "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -47,14 +48,24 @@ func TestNATSJetStreamScalerClusterWithStreamReplicasWithNoAdvertise(t *testing. func testNATSJetStreamScalerClusterWithStreamReplicas(t *testing.T, noAdvertise bool) { // Create k8s resources. kc := GetKubernetesClient(t) + testData, testTemplates := nats.GetJetStreamDeploymentTemplateData(testNamespace, natsAddress, natsServerMonitoringEndpoint, messagePublishCount) + t.Cleanup(func() { + removeStreamAndConsumer(t, 1, testData.NatsStream, testNamespace, natsAddress) + DeleteKubernetesResources(t, testNamespace, testData, testTemplates) + + removeClusterWithJetStream(t) + DeleteNamespace(t, natsNamespace) + deleted := WaitForNamespaceDeletion(t, natsNamespace) + assert.Truef(t, deleted, "%s namespace not deleted", natsNamespace) + }) // Deploy NATS server. installClusterWithJetStream(t, kc, noAdvertise) - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, nats.NatsJetStreamName, natsNamespace, natsServerReplicas, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, nats.NatsJetStreamName, natsNamespace, natsServerReplicas, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) // Create k8s resources for testing. - testData, testTemplates := nats.GetJetStreamDeploymentTemplateData(testNamespace, natsAddress, natsServerMonitoringEndpoint, messagePublishCount) + CreateKubernetesResources(t, kc, testNamespace, testData, testTemplates) // Create 3 replica stream with consumer @@ -81,29 +92,27 @@ func testNATSJetStreamScalerClusterWithStreamReplicas(t *testing.T, noAdvertise testScaleOut(t, kc, testData) testScaleIn(t, kc) - - // Cleanup test namespace - removeStreamAndConsumer(t, 1, testData.NatsStream, testNamespace, natsAddress) - DeleteKubernetesResources(t, testNamespace, testData, testTemplates) - - // Cleanup nats namespace - removeClusterWithJetStream(t) - DeleteNamespace(t, natsNamespace) - deleted := WaitForNamespaceDeletion(t, natsNamespace) - assert.Truef(t, deleted, "%s namespace not deleted", natsNamespace) } func TestNATSv2_10JetStreamScalerClusterWithStreamReplicas(t *testing.T) { // Create k8s resources. kc := GetKubernetesClient(t) - + testData, testTemplates := nats.GetJetStreamDeploymentTemplateData(testNamespace, natsAddress, natsServerHeadlessMonitoringEndpoint, messagePublishCount) + t.Cleanup(func() { + removeStreamAndConsumer(t, 1, testData.NatsStream, testNamespace, natsAddress) + DeleteKubernetesResources(t, testNamespace, testData, testTemplates) + + removeClusterWithJetStream(t) + DeleteNamespace(t, natsNamespace) + deleted := WaitForNamespaceDeletion(t, natsNamespace) + assert.Truef(t, deleted, "%s namespace not deleted", natsNamespace) + }) // Deploy NATS server. installClusterWithJetStreaV2_10(t, kc) - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, nats.NatsJetStreamName, natsNamespace, natsServerReplicas, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, nats.NatsJetStreamName, natsNamespace, natsServerReplicas, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) // Create k8s resources for testing. - testData, testTemplates := nats.GetJetStreamDeploymentTemplateData(testNamespace, natsAddress, natsServerHeadlessMonitoringEndpoint, messagePublishCount) CreateKubernetesResources(t, kc, testNamespace, testData, testTemplates) // Create 3 replica stream with consumer @@ -132,16 +141,6 @@ func TestNATSv2_10JetStreamScalerClusterWithStreamReplicas(t *testing.T) { testActivation(t, kc, testData) testScaleOut(t, kc, testData) testScaleIn(t, kc) - - // Cleanup test namespace - removeStreamAndConsumer(t, 1, testData.NatsStream, testNamespace, natsAddress) - DeleteKubernetesResources(t, testNamespace, testData, testTemplates) - - // Cleanup nats namespace - removeClusterWithJetStream(t) - DeleteNamespace(t, natsNamespace) - deleted := WaitForNamespaceDeletion(t, natsNamespace) - assert.Truef(t, deleted, "%s namespace not deleted", natsNamespace) } // installStreamAndConsumer creates stream and consumer job. @@ -174,9 +173,9 @@ func removeStreamAndConsumer(t *testing.T, streamReplicas int, stream, namespace func installClusterWithJetStream(t *testing.T, kc *k8s.Clientset, noAdvertise bool) { CreateNamespace(t, kc, natsNamespace) _, err := ExecuteCommand(fmt.Sprintf("helm repo add %s %s", nats.NatsJetStreamName, natsHelmRepo)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --version %s --set %s --set %s --set %s --set %s --set %s --wait --namespace %s %s nats/nats`, nats.NatsJetStreamChartVersion, "nats.jetstream.enabled=true", @@ -186,16 +185,16 @@ func installClusterWithJetStream(t *testing.T, kc *k8s.Clientset, noAdvertise bo fmt.Sprintf("cluster.noAdvertise=%t", noAdvertise), natsNamespace, nats.NatsJetStreamName)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } // installClusterWithJetStreaV2_10 install the nats helm chart with clustered jetstream enabled using v2.10 func installClusterWithJetStreaV2_10(t *testing.T, kc *k8s.Clientset) { CreateNamespace(t, kc, natsNamespace) _, err := ExecuteCommand(fmt.Sprintf("helm repo add %s %s", nats.NatsJetStreamName, natsHelmRepo)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --version %s --set %s --set %s --set %s --set %s --set %s --set %s --set %s --wait --namespace %s %s nats/nats`, nats.Natsv2_10JetStreamChartVersion, "config.jetstream.enabled=true", @@ -207,7 +206,7 @@ func installClusterWithJetStreaV2_10(t *testing.T, kc *k8s.Clientset) { fmt.Sprintf("config.cluster.replicas=%d", natsServerReplicas), natsNamespace, nats.NatsJetStreamName)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } // removeClusterWithJetStream uninstall the nats helm chart diff --git a/tests/scalers/nats_jetstream/nats_jetstream_standalone/nats_jetstream_standalone_test.go b/tests/scalers/nats_jetstream/nats_jetstream_standalone/nats_jetstream_standalone_test.go index 23ca44f1b9b..836895fff8c 100644 --- a/tests/scalers/nats_jetstream/nats_jetstream_standalone/nats_jetstream_standalone_test.go +++ b/tests/scalers/nats_jetstream/nats_jetstream_standalone/nats_jetstream_standalone_test.go @@ -9,6 +9,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" k8s "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -183,34 +184,35 @@ spec: func TestNATSJetStreamScaler(t *testing.T) { // Create k8s resources. kc := GetKubernetesClient(t) + data, templates := nats.GetJetStreamDeploymentTemplateData(testNamespace, natsAddress, natsServerMonitoringEndpoint, messagePublishCount) + t.Cleanup(func() { + removeServerWithJetStream(t, natsNamespace) + + DeleteNamespace(t, natsNamespace) + deleted := WaitForNamespaceDeletion(t, natsNamespace) + assert.Truef(t, deleted, "%s namespace not deleted", natsNamespace) + // Cleanup test namespace + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Deploy NATS server. installServerWithJetStream(t, kc, natsNamespace) - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, nats.NatsJetStreamName, natsNamespace, 1, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, nats.NatsJetStreamName, natsNamespace, 1, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) // Create k8s resources for testing. - data, templates := nats.GetJetStreamDeploymentTemplateData(testNamespace, natsAddress, natsServerMonitoringEndpoint, messagePublishCount) CreateKubernetesResources(t, kc, testNamespace, data, templates) // Create stream and consumer. data.NatsStream = "standalone" KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", nats.ScaledObjectTemplate) installStreamAndConsumer(t, data.NatsStream, testNamespace, natsAddress) - assert.True(t, WaitForJobSuccess(t, kc, "stream", testNamespace, 60, 3), + require.True(t, WaitForJobSuccess(t, kc, "stream", testNamespace, 60, 3), "stream and consumer creation job should be success") testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // Cleanup nats namespace - removeServerWithJetStream(t, natsNamespace) - DeleteNamespace(t, natsNamespace) - deleted := WaitForNamespaceDeletion(t, natsNamespace) - assert.Truef(t, deleted, "%s namespace not deleted", natsNamespace) - // Cleanup test namespace - DeleteKubernetesResources(t, testNamespace, data, templates) } // installStreamAndConsumer creates stream and consumer. diff --git a/tests/scalers/newrelic/newrelic_test.go b/tests/scalers/newrelic/newrelic_test.go index 2a16e2a4c61..335730aba7a 100644 --- a/tests/scalers/newrelic/newrelic_test.go +++ b/tests/scalers/newrelic/newrelic_test.go @@ -214,9 +214,13 @@ func TestNewRelicScaler(t *testing.T) { newRelicRegion = "EU" } - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources CreateKubernetesResources(t, kc, testNamespace, data, templates) installNewRelic(t) @@ -228,9 +232,6 @@ func TestNewRelicScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc, data) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -258,9 +259,9 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { func installNewRelic(t *testing.T) { _, err := ExecuteCommand(fmt.Sprintf("helm repo add new-relic %s", newRelicHelmRepoURL)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) cmd := fmt.Sprintf(`helm upgrade --install --set global.cluster=%s --set prometheus.enabled=true --set ksm.enabled=true --set global.lowDataMode=true --set global.licenseKey=%s --timeout 600s --set logging.enabled=false --set ksm.enabled=true --set logging.enabled=true --namespace %s ri-keda new-relic/nri-bundle`, kuberneteClusterName, @@ -268,7 +269,7 @@ func installNewRelic(t *testing.T) { testNamespace) _, err = ExecuteCommand(cmd) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/openstack_swift/helper.go b/tests/scalers/openstack_swift/helper.go index 5196aeae1fc..9acf322b611 100644 --- a/tests/scalers/openstack_swift/helper.go +++ b/tests/scalers/openstack_swift/helper.go @@ -12,6 +12,7 @@ import ( containers "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" objects "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func CreateClient(t *testing.T, authURL, userID, password, projectID string) *gophercloud.ServiceClient { @@ -24,9 +25,9 @@ func CreateClient(t *testing.T, authURL, userID, password, projectID string) *go }, } provider, err := openstack.AuthenticatedClient(opts) - assert.NoErrorf(t, err, "cannot create the provider - %s", err) + require.NoErrorf(t, err, "cannot create the provider - %s", err) client, err := openstack.NewObjectStorageV1(provider, gophercloud.EndpointOpts{}) - assert.NoErrorf(t, err, "cannot create the client - %s", err) + require.NoErrorf(t, err, "cannot create the client - %s", err) return client } @@ -35,7 +36,7 @@ func CreateContainer(t *testing.T, client *gophercloud.ServiceClient, name strin ContentType: "application/json", } _, err := containers.Create(client, name, createOpts).Extract() - assert.NoErrorf(t, err, "cannot create the container - %s", err) + require.NoErrorf(t, err, "cannot create the container - %s", err) } func DeleteContainer(t *testing.T, client *gophercloud.ServiceClient, name string) { diff --git a/tests/scalers/openstack_swift/openstack_swift_test.go b/tests/scalers/openstack_swift/openstack_swift_test.go index b38fbc27ff1..915f5159f99 100644 --- a/tests/scalers/openstack_swift/openstack_swift_test.go +++ b/tests/scalers/openstack_swift/openstack_swift_test.go @@ -161,20 +161,19 @@ func TestScaler(t *testing.T) { client := helper.CreateClient(t, authURL, userID, password, projectID) helper.CreateContainer(t, client, containerName) - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + helper.DeleteContainer(t, client, containerName) + }) + + // Create kubernetes resources CreateKubernetesResources(t, kc, testNamespace, data, templates) testActivation(t, kc, client) testScaleOut(t, kc, client) testScaleIn(t, kc, client) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) - - // delete openstack resources - helper.DeleteContainer(t, client, containerName) } func testActivation(t *testing.T, kc *kubernetes.Clientset, client *gophercloud.ServiceClient) { diff --git a/tests/scalers/postgresql/postgresql_high_available/postgresql_ha_test.go b/tests/scalers/postgresql/postgresql_high_available/postgresql_ha_test.go index 45844312cdd..727a6d82449 100644 --- a/tests/scalers/postgresql/postgresql_high_available/postgresql_ha_test.go +++ b/tests/scalers/postgresql/postgresql_high_available/postgresql_ha_test.go @@ -11,6 +11,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -115,29 +116,33 @@ data: ) func TestPostreSQLScaler(t *testing.T) { - // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) - data, postgreSQLtemplates := getPostgreSQLTemplateData() + _, postgreSQLtemplates := getPostgreSQLTemplateData() + _, templates := getTemplateData() + t.Cleanup(func() { + KubectlDeleteMultipleWithTemplate(t, data, templates) + DeleteKubernetesResources(t, testNamespace, data, postgreSQLtemplates) + }) + // Create kubernetes resources for PostgreSQL server CreateKubernetesResources(t, kc, testNamespace, data, postgreSQLtemplates) - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, postgreSQLStatefulSetName, testNamespace, 1, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, postgreSQLStatefulSetName, testNamespace, 1, 60, 3), "replica count should be %d after 3 minutes", 1) - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, postgreSQLReplicaStatefulSetName, testNamespace, 1, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, postgreSQLReplicaStatefulSetName, testNamespace, 1, 60, 3), "replica count should be %d after 3 minutes", 1) createTableSQL := "CREATE TABLE task_instance (id serial PRIMARY KEY,state VARCHAR(10));" ok, out, errOut, err := WaitForSuccessfulExecCommandOnSpecificPod(t, postgresqlMasterPodName, testNamespace, fmt.Sprintf("psql -U %s -d %s -c \"%s\"", postgreSQLUsername, postgreSQLDatabase, createTableSQL), 60, 3) - assert.True(t, ok, "executing a command on PostreSQL Master Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) + require.True(t, ok, "executing a command on PostreSQL Master Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) checkTableExists := "SELECT * from task_instance;" ok, out, errOut, err = WaitForSuccessfulExecCommandOnSpecificPod(t, postgresqlReplicaPodName, testNamespace, fmt.Sprintf("psql -U %s -d %s -c \"%s\"", postgreSQLUsername, postgreSQLDatabase, checkTableExists), 60, 3) - assert.True(t, ok, "executing a command on PostreSQL Replica Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) + require.True(t, ok, "executing a command on PostreSQL Replica Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) // Create kubernetes resources for testing - data, templates := getTemplateData() KubectlApplyMultipleWithTemplate(t, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -145,10 +150,6 @@ func TestPostreSQLScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - KubectlDeleteMultipleWithTemplate(t, data, templates) - DeleteKubernetesResources(t, testNamespace, data, postgreSQLtemplates) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/postgresql/postgresql_standalone/postgresql_test.go b/tests/scalers/postgresql/postgresql_standalone/postgresql_test.go index f907c12816c..42761ea9c97 100644 --- a/tests/scalers/postgresql/postgresql_standalone/postgresql_test.go +++ b/tests/scalers/postgresql/postgresql_standalone/postgresql_test.go @@ -10,6 +10,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -70,21 +71,26 @@ data: ) func TestPostreSQLScaler(t *testing.T) { - // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) - data, postgreSQLtemplates := getPostgreSQLTemplateData() + _, postgreSQLtemplates := getPostgreSQLTemplateData() + _, templates := getTemplateData() + t.Cleanup(func() { + KubectlDeleteMultipleWithTemplate(t, data, templates) + DeleteKubernetesResources(t, testNamespace, data, postgreSQLtemplates) + }) + + // Create kubernetes resources for PostgreSQL server CreateKubernetesResources(t, kc, testNamespace, data, postgreSQLtemplates) - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, postgreSQLStatefulSetName, testNamespace, 1, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, postgreSQLStatefulSetName, testNamespace, 1, 60, 3), "replica count should be %d after 3 minutes", 1) createTableSQL := "CREATE TABLE task_instance (id serial PRIMARY KEY,state VARCHAR(10));" ok, out, errOut, err := WaitForSuccessfulExecCommandOnSpecificPod(t, postgresqlPodName, testNamespace, fmt.Sprintf("psql -U %s -d %s -c \"%s\"", postgreSQLUsername, postgreSQLDatabase, createTableSQL), 60, 3) - assert.True(t, ok, "executing a command on PostreSQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) + require.True(t, ok, "executing a command on PostreSQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) // Create kubernetes resources for testing - data, templates := getTemplateData() KubectlApplyMultipleWithTemplate(t, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -92,10 +98,6 @@ func TestPostreSQLScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - KubectlDeleteMultipleWithTemplate(t, data, templates) - DeleteKubernetesResources(t, testNamespace, data, postgreSQLtemplates) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/predictkube/predictkube_test.go b/tests/scalers/predictkube/predictkube_test.go index 803ed240b75..f39e84fa594 100644 --- a/tests/scalers/predictkube/predictkube_test.go +++ b/tests/scalers/predictkube/predictkube_test.go @@ -210,12 +210,17 @@ spec: func TestScaler(t *testing.T) { require.NotEmpty(t, predictkubeAPIKey, "PREDICTKUBE_API_KEY env variable is required for predictkube test") - // Create kubernetes resources kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + prometheus.Uninstall(t, prometheusServerName, testNamespace, nil) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources prometheus.Install(t, kc, prometheusServerName, testNamespace, nil) // Create kubernetes resources for testing - data, templates := getTemplateData() KubectlApplyMultipleWithTemplate(t, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredAppName, testNamespace, 1, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -225,10 +230,6 @@ func TestScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - KubectlDeleteMultipleWithTemplate(t, data, templates) - prometheus.Uninstall(t, prometheusServerName, testNamespace, nil) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/prometheus/prometheus_helper.go b/tests/scalers/prometheus/prometheus_helper.go index 2251d744edf..acf35418bb7 100644 --- a/tests/scalers/prometheus/prometheus_helper.go +++ b/tests/scalers/prometheus/prometheus_helper.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" "github.com/kedacore/keda/v2/tests/helper" @@ -551,7 +551,7 @@ func Install(t *testing.T, kc *kubernetes.Clientset, name, namespace string, pki PrometheusServerName: name, } helper.KubectlApplyMultipleWithTemplate(t, data, getPrometheusTemplates(pki)) - assert.True(t, helper.WaitForDeploymentReplicaReadyCount(t, kc, name, namespace, 1, 60, 3), + require.True(t, helper.WaitForDeploymentReplicaReadyCount(t, kc, name, namespace, 1, 60, 3), "replica count should be 1 after 3 minutes") } @@ -561,5 +561,4 @@ func Uninstall(t *testing.T, name, namespace string, pki *VaultPkiData) { PrometheusServerName: name, } helper.KubectlDeleteMultipleWithTemplate(t, data, getPrometheusTemplates(pki)) - helper.DeleteNamespace(t, namespace) } diff --git a/tests/scalers/prometheus/prometheus_test.go b/tests/scalers/prometheus/prometheus_test.go index c379bee6fd9..d570665bed2 100644 --- a/tests/scalers/prometheus/prometheus_test.go +++ b/tests/scalers/prometheus/prometheus_test.go @@ -211,10 +211,14 @@ spec: func TestPrometheusScaler(t *testing.T) { // Create kubernetes resources kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + prometheus.Uninstall(t, prometheusServerName, testNamespace, nil) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) prometheus.Install(t, kc, prometheusServerName, testNamespace, nil) // Create kubernetes resources for testing - data, templates := getTemplateData() KubectlApplyMultipleWithTemplate(t, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredAppName, testNamespace, 1, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -224,10 +228,6 @@ func TestPrometheusScaler(t *testing.T) { testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - KubectlDeleteMultipleWithTemplate(t, data, templates) - prometheus.Uninstall(t, prometheusServerName, testNamespace, nil) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/pulsar/helper/helper.go b/tests/scalers/pulsar/helper/helper.go index 00e06bf84b2..78090943940 100644 --- a/tests/scalers/pulsar/helper/helper.go +++ b/tests/scalers/pulsar/helper/helper.go @@ -9,6 +9,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" "github.com/kedacore/keda/v2/tests/helper" @@ -286,21 +287,28 @@ func TestScalerWithConfig(t *testing.T, testName string, numPartitions int) { // Create kubernetes resources kc := helper.GetKubernetesClient(t) data, templates := getTemplateData(testName, numPartitions) + t.Cleanup(func() { + helper.KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + helper.KubectlDeleteWithTemplate(t, data, "publishJobTemplate", topicPublishJobTemplate) + helper.KubectlDeleteWithTemplate(t, data, "topicInitJobTemplate", topicInitJobTemplate) + + helper.DeleteKubernetesResources(t, testName, data, templates) + }) helper.CreateKubernetesResources(t, kc, testName, data, templates) - assert.True(t, helper.WaitForStatefulsetReplicaReadyCount(t, kc, testName, testName, 1, 300, 1), + require.True(t, helper.WaitForStatefulsetReplicaReadyCount(t, kc, testName, testName, 1, 300, 1), "replica count should be 1 within 5 minutes") helper.KubectlReplaceWithTemplate(t, data, "topicInitJobTemplate", topicInitJobTemplate) - assert.True(t, helper.WaitForJobSuccess(t, kc, getTopicInitJobName(testName), testName, 300, 1), + require.True(t, helper.WaitForJobSuccess(t, kc, getTopicInitJobName(testName), testName, 300, 1), "job should succeed within 5 minutes") helper.KubectlApplyWithTemplate(t, data, "consumerTemplate", consumerTemplate) // run consumer for create subscription - assert.True(t, helper.WaitForDeploymentReplicaReadyCount(t, kc, getConsumerDeploymentName(testName), testName, 1, 300, 1), + require.True(t, helper.WaitForDeploymentReplicaReadyCount(t, kc, getConsumerDeploymentName(testName), testName, 1, 300, 1), "replica count should be 1 within 5 minutes") helper.KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) @@ -313,13 +321,6 @@ func TestScalerWithConfig(t *testing.T, testName string, numPartitions int) { testScaleOut(t, kc, data) // scale in testScaleIn(t, kc, testName) - - // cleanup - helper.KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - helper.KubectlDeleteWithTemplate(t, data, "publishJobTemplate", topicPublishJobTemplate) - helper.KubectlDeleteWithTemplate(t, data, "topicInitJobTemplate", topicInitJobTemplate) - - helper.DeleteKubernetesResources(t, testName, data, templates) } func getTemplateData(testName string, numPartitions int) (templateData, []helper.Template) { diff --git a/tests/scalers/rabbitmq/rabbitmq_helper.go b/tests/scalers/rabbitmq/rabbitmq_helper.go index b44c4ce135b..6f22b178d95 100644 --- a/tests/scalers/rabbitmq/rabbitmq_helper.go +++ b/tests/scalers/rabbitmq/rabbitmq_helper.go @@ -7,6 +7,7 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" "github.com/kedacore/keda/v2/tests/helper" @@ -91,7 +92,7 @@ spec: namespace: {{.Namespace}} spec: containers: - - image: rabbitmq:3-management + - image: rabbitmq:3.12-management name: rabbitmq volumeMounts: - mountPath: /etc/rabbitmq @@ -167,6 +168,8 @@ spec: ` ) +const RabbitServerName string = "rabbitmq" + type RabbitOAuthConfig struct { Enable bool ClientID string @@ -216,6 +219,8 @@ func RMQInstall(t *testing.T, kc *kubernetes.Clientset, namespace, user, passwor } helper.KubectlApplyWithTemplate(t, data, "rmqDeploymentTemplate", deploymentTemplate) + require.True(t, helper.WaitForDeploymentReplicaReadyCount(t, kc, RabbitServerName, namespace, 1, 180, 1), + "replica count should be 1 after 3 minute") } func RMQUninstall(t *testing.T, namespace, user, password, vhost string, oauth RabbitOAuthConfig) { diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go index be16d0a480b..3bc8efa179e 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go @@ -74,11 +74,14 @@ type templateData struct { func TestScaler(t *testing.T) { // setup t.Log("--- setting up ---") - - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) + }) + // Create kubernetes resources RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithoutOAuth()) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -88,11 +91,6 @@ func TestScaler(t *testing.T) { testScaling(t, kc) testActivationValue(t, kc) - - // cleanup - t.Log("--- cleaning up ---") - DeleteKubernetesResources(t, testNamespace, data, templates) - RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go index 7ae59414a0d..c1d957e103e 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go @@ -74,11 +74,14 @@ type templateData struct { func TestScaler(t *testing.T) { // setup t.Log("--- setting up ---") - - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) + }) + // Create kubernetes resources RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithoutOAuth()) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -88,11 +91,6 @@ func TestScaler(t *testing.T) { testScaling(t, kc) testActivationValue(t, kc) - - // cleanup - t.Log("--- cleaning up ---") - DeleteKubernetesResources(t, testNamespace, data, templates) - RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go index d19c5138956..ff1f930e7b0 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go @@ -75,11 +75,14 @@ type templateData struct { func TestScaler(t *testing.T) { // setup t.Log("--- setting up ---") - - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) + }) + // Create kubernetes resources RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithoutOAuth()) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -87,11 +90,6 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") testScaling(t, kc) - - // cleanup - t.Log("--- cleaning up ---") - DeleteKubernetesResources(t, testNamespace, data, templates) - RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go index 865ff77b1b2..bd49396717a 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go @@ -119,10 +119,14 @@ func TestScaler(t *testing.T) { require.NotEmpty(t, rabbitAppClientID, "TF_AZURE_RABBIT_API_APPLICATION_ID env variable is required for rabbitmq workload identity tests") require.NotEmpty(t, azureADTenantID, "TF_AZURE_SP_TENANT env variable is required for rabbitmq workload identity tests") - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + RMQUninstall(t, rmqNamespace, user, password, vhost, WithAzureADOAuth(azureADTenantID, rabbitAppClientID)) + }) + // Create kubernetes resources RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithAzureADOAuth(azureADTenantID, rabbitAppClientID)) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -130,11 +134,6 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") testScaling(t, kc) - - // cleanup - t.Log("--- cleaning up ---") - DeleteKubernetesResources(t, testNamespace, data, templates) - RMQUninstall(t, rmqNamespace, user, password, vhost, WithAzureADOAuth(azureADTenantID, rabbitAppClientID)) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_regex/rabbitmq_queue_http_regex_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_regex/rabbitmq_queue_http_regex_test.go index da128a85519..0ac8c1c6f66 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_regex/rabbitmq_queue_http_regex_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_regex/rabbitmq_queue_http_regex_test.go @@ -78,11 +78,14 @@ type templateData struct { func TestScaler(t *testing.T) { // setup t.Log("--- setting up ---") - - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) + }) + // Create kubernetes resources RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithoutOAuth()) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -90,11 +93,6 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") testScaling(t, kc) - - // cleanup - t.Log("--- cleaning up ---") - DeleteKubernetesResources(t, testNamespace, data, templates) - RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_regex_aad_wi/rabbitmq_queue_http_regex_aad_wi_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_regex_aad_wi/rabbitmq_queue_http_regex_aad_wi_test.go index db40a384f24..1faf94c675e 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_regex_aad_wi/rabbitmq_queue_http_regex_aad_wi_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_regex_aad_wi/rabbitmq_queue_http_regex_aad_wi_test.go @@ -121,10 +121,14 @@ func TestScaler(t *testing.T) { require.NotEmpty(t, rabbitAppClientID, "TF_AZURE_RABBIT_API_APPLICATION_ID env variable is required for rabbitmq workload identity tests") require.NotEmpty(t, azureADTenantID, "TF_AZURE_SP_TENANT env variable is required for rabbitmq workload identity tests") - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + RMQUninstall(t, rmqNamespace, user, password, vhost, WithAzureADOAuth(azureADTenantID, rabbitAppClientID)) + }) + // Create kubernetes resources RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithAzureADOAuth(azureADTenantID, rabbitAppClientID)) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -132,11 +136,6 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") testScaling(t, kc) - - // cleanup - t.Log("--- cleaning up ---") - DeleteKubernetesResources(t, testNamespace, data, templates) - RMQUninstall(t, rmqNamespace, user, password, vhost, WithAzureADOAuth(azureADTenantID, rabbitAppClientID)) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_regex_vhost/rabbitmq_queue_http_regex_vhost_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_regex_vhost/rabbitmq_queue_http_regex_vhost_test.go index 113d92d3cff..d633a0e0e39 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_regex_vhost/rabbitmq_queue_http_regex_vhost_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_regex_vhost/rabbitmq_queue_http_regex_vhost_test.go @@ -83,11 +83,14 @@ type templateData struct { func TestScaler(t *testing.T) { // setup t.Log("--- setting up ---") - - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) + }) + // Create kubernetes resources RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithoutOAuth()) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -98,11 +101,6 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") testScaling(t, kc) - - // cleanup - t.Log("--- cleaning up ---") - DeleteKubernetesResources(t, testNamespace, data, templates) - RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go index a6c6963841b..9dbefd50480 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go @@ -75,11 +75,14 @@ type templateData struct { func TestScaler(t *testing.T) { // setup t.Log("--- setting up ---") - - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) + }) + // Create kubernetes resources RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithoutOAuth()) CreateKubernetesResources(t, kc, testNamespace, data, templates) @@ -87,11 +90,6 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") testScaling(t, kc) - - // cleanup - t.Log("--- cleaning up ---") - DeleteKubernetesResources(t, testNamespace, data, templates) - RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) } func getTemplateData() (templateData, []Template) { diff --git a/tests/scalers/redis/helper/helper.go b/tests/scalers/redis/helper/helper.go index 69df98a9783..a052f79e0f9 100644 --- a/tests/scalers/redis/helper/helper.go +++ b/tests/scalers/redis/helper/helper.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" "github.com/kedacore/keda/v2/tests/helper" @@ -88,14 +89,14 @@ func RemoveStandalone(t *testing.T, name, namespace string) { func InstallSentinel(t *testing.T, kc *kubernetes.Clientset, name, namespace, password string) { helper.CreateNamespace(t, kc, namespace) _, err := helper.ExecuteCommand("helm repo add bitnami https://charts.bitnami.com/bitnami") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = helper.ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = helper.ExecuteCommand(fmt.Sprintf(`helm install --wait --timeout 900s %s --namespace %s --set sentinel.enabled=true --set master.persistence.enabled=false --set replica.persistence.enabled=false --set global.redis.password=%s bitnami/redis`, name, namespace, password)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } func RemoveSentinel(t *testing.T, name, namespace string) { @@ -109,14 +110,14 @@ func RemoveSentinel(t *testing.T, name, namespace string) { func InstallCluster(t *testing.T, kc *kubernetes.Clientset, name, namespace, password string) { helper.CreateNamespace(t, kc, namespace) _, err := helper.ExecuteCommand("helm repo add bitnami https://charts.bitnami.com/bitnami") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = helper.ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = helper.ExecuteCommand(fmt.Sprintf(`helm install --wait --timeout 900s %s --namespace %s --set persistence.enabled=false --set password=%s --timeout 10m0s bitnami/redis-cluster`, name, namespace, password)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } func RemoveCluster(t *testing.T, name, namespace string) { diff --git a/tests/scalers/redis/redis_cluster_lists/redis_cluster_lists_test.go b/tests/scalers/redis/redis_cluster_lists/redis_cluster_lists_test.go index 9610134e1fd..df86948f35c 100644 --- a/tests/scalers/redis/redis_cluster_lists/redis_cluster_lists_test.go +++ b/tests/scalers/redis/redis_cluster_lists/redis_cluster_lists_test.go @@ -169,21 +169,21 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveCluster(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Cluster redis.InstallCluster(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - redis.RemoveCluster(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/redis/redis_cluster_streams_lag/redis_cluster_streams_lag_test.go b/tests/scalers/redis/redis_cluster_streams_lag/redis_cluster_streams_lag_test.go index 49aa76a11d9..c4abb8be528 100644 --- a/tests/scalers/redis/redis_cluster_streams_lag/redis_cluster_streams_lag_test.go +++ b/tests/scalers/redis/redis_cluster_streams_lag/redis_cluster_streams_lag_test.go @@ -178,13 +178,15 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) - + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveCluster(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Cluster redis.InstallCluster(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() - CreateKubernetesResources(t, kc, testNamespace, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 3), @@ -201,10 +203,6 @@ func TestScaler(t *testing.T) { t.Log("--- testing scale in ---") testScaleIn(t, kc, minReplicaCount) - - // Clean up - DeleteKubernetesResources(t, testNamespace, data, templates) - redis.RemoveCluster(t, testName, redisNamespace) } func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData, numMessages int, maxReplicas int) { diff --git a/tests/scalers/redis/redis_cluster_streams_length/redis_cluster_streams_length_test.go b/tests/scalers/redis/redis_cluster_streams_length/redis_cluster_streams_length_test.go index e009c29822d..19658aa5c73 100644 --- a/tests/scalers/redis/redis_cluster_streams_length/redis_cluster_streams_length_test.go +++ b/tests/scalers/redis/redis_cluster_streams_length/redis_cluster_streams_length_test.go @@ -178,20 +178,20 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveCluster(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Cluster redis.InstallCluster(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - redis.RemoveCluster(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/redis/redis_cluster_streams_pending_entries/redis_cluster_streams_pending_entries_test.go b/tests/scalers/redis/redis_cluster_streams_pending_entries/redis_cluster_streams_pending_entries_test.go index 79a5e85cfac..80a6a663fad 100644 --- a/tests/scalers/redis/redis_cluster_streams_pending_entries/redis_cluster_streams_pending_entries_test.go +++ b/tests/scalers/redis/redis_cluster_streams_pending_entries/redis_cluster_streams_pending_entries_test.go @@ -177,20 +177,20 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveCluster(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Cluster redis.InstallCluster(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - redis.RemoveCluster(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/redis/redis_sentinel_lists/redis_sentinel_lists_test.go b/tests/scalers/redis/redis_sentinel_lists/redis_sentinel_lists_test.go index c2edb58ac6c..ee17e7704df 100644 --- a/tests/scalers/redis/redis_sentinel_lists/redis_sentinel_lists_test.go +++ b/tests/scalers/redis/redis_sentinel_lists/redis_sentinel_lists_test.go @@ -177,21 +177,21 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveSentinel(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Sentinel redis.InstallSentinel(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - redis.RemoveSentinel(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/redis/redis_sentinel_streams_lag/redis_sentinel_streams_lag_test.go b/tests/scalers/redis/redis_sentinel_streams_lag/redis_sentinel_streams_lag_test.go index e269f15c8fc..fc6d8e2da2f 100644 --- a/tests/scalers/redis/redis_sentinel_streams_lag/redis_sentinel_streams_lag_test.go +++ b/tests/scalers/redis/redis_sentinel_streams_lag/redis_sentinel_streams_lag_test.go @@ -192,13 +192,16 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveSentinel(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Sentinel redis.InstallSentinel(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() - CreateKubernetesResources(t, kc, testNamespace, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 3), @@ -215,10 +218,6 @@ func TestScaler(t *testing.T) { t.Log("--- testing scale in ---") testScaleIn(t, kc, minReplicaCount) - - // Clean up - DeleteKubernetesResources(t, testNamespace, data, templates) - redis.RemoveSentinel(t, testName, redisNamespace) } func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData, numMessages int, maxReplicas int) { diff --git a/tests/scalers/redis/redis_sentinel_streams_length/redis_sentinel_streams_length_test.go b/tests/scalers/redis/redis_sentinel_streams_length/redis_sentinel_streams_length_test.go index cca11608b36..b6fd46c8d86 100644 --- a/tests/scalers/redis/redis_sentinel_streams_length/redis_sentinel_streams_length_test.go +++ b/tests/scalers/redis/redis_sentinel_streams_length/redis_sentinel_streams_length_test.go @@ -192,20 +192,20 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveSentinel(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Sentinel redis.InstallSentinel(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - redis.RemoveSentinel(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/redis/redis_sentinel_streams_pending_entries/redis_sentinel_streams_pending_entries_test.go b/tests/scalers/redis/redis_sentinel_streams_pending_entries/redis_sentinel_streams_pending_entries_test.go index 3fc4ab7d714..33f08933ce7 100644 --- a/tests/scalers/redis/redis_sentinel_streams_pending_entries/redis_sentinel_streams_pending_entries_test.go +++ b/tests/scalers/redis/redis_sentinel_streams_pending_entries/redis_sentinel_streams_pending_entries_test.go @@ -191,20 +191,20 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveSentinel(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Sentinel redis.InstallSentinel(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - redis.RemoveSentinel(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/redis/redis_standalone_lists/redis_standalone_lists_test.go b/tests/scalers/redis/redis_standalone_lists/redis_standalone_lists_test.go index 1f234992545..7c61ff4d2bf 100644 --- a/tests/scalers/redis/redis_standalone_lists/redis_standalone_lists_test.go +++ b/tests/scalers/redis/redis_standalone_lists/redis_standalone_lists_test.go @@ -165,21 +165,21 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveStandalone(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Standalone redis.InstallStandalone(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - redis.RemoveStandalone(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/redis/redis_standalone_streams_lag/redis_standalone_streams_lag_test.go b/tests/scalers/redis/redis_standalone_streams_lag/redis_standalone_streams_lag_test.go index 13e808b28a5..4de773e5e61 100644 --- a/tests/scalers/redis/redis_standalone_streams_lag/redis_standalone_streams_lag_test.go +++ b/tests/scalers/redis/redis_standalone_streams_lag/redis_standalone_streams_lag_test.go @@ -175,12 +175,16 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveStandalone(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Standalone redis.InstallStandalone(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 3), @@ -197,10 +201,6 @@ func TestScaler(t *testing.T) { t.Log("--- testing scale in ---") testScaleIn(t, kc, minReplicaCount) - - // cleanup - redis.RemoveStandalone(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData, numMessages int, maxReplicas int) { diff --git a/tests/scalers/redis/redis_standalone_streams_length/redis_standalone_streams_length_test.go b/tests/scalers/redis/redis_standalone_streams_length/redis_standalone_streams_length_test.go index 9a50bd58c92..67896d8f382 100644 --- a/tests/scalers/redis/redis_standalone_streams_length/redis_standalone_streams_length_test.go +++ b/tests/scalers/redis/redis_standalone_streams_length/redis_standalone_streams_length_test.go @@ -175,20 +175,20 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveStandalone(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Standalone redis.InstallStandalone(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - redis.RemoveStandalone(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/redis/redis_standalone_streams_pending_entries/redis_standalone_streams_test_pending_entries_test.go b/tests/scalers/redis/redis_standalone_streams_pending_entries/redis_standalone_streams_test_pending_entries_test.go index b52a056ea2f..79299e8440a 100644 --- a/tests/scalers/redis/redis_standalone_streams_pending_entries/redis_standalone_streams_test_pending_entries_test.go +++ b/tests/scalers/redis/redis_standalone_streams_pending_entries/redis_standalone_streams_test_pending_entries_test.go @@ -174,20 +174,20 @@ spec: func TestScaler(t *testing.T) { // Create kubernetes resources for PostgreSQL server kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + redis.RemoveStandalone(t, testName, redisNamespace) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) // Create Redis Standalone redis.InstallStandalone(t, kc, testName, redisNamespace, redisPassword) // Create kubernetes resources for testing - data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - redis.RemoveStandalone(t, testName, redisNamespace) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/selenium/selenium_test.go b/tests/scalers/selenium/selenium_test.go index 9931dd2e556..8d4b43cef19 100644 --- a/tests/scalers/selenium/selenium_test.go +++ b/tests/scalers/selenium/selenium_test.go @@ -10,6 +10,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -455,25 +456,26 @@ spec: ) func TestSeleniumScaler(t *testing.T) { - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources CreateKubernetesResources(t, kc, testNamespace, data, templates) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, hubDeploymentName, testNamespace, 1, 60, 1), + require.True(t, WaitForDeploymentReplicaReadyCount(t, kc, hubDeploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, chromeDeploymentName, testNamespace, minReplicaCount, 60, 1), + require.True(t, WaitForDeploymentReplicaReadyCount(t, kc, chromeDeploymentName, testNamespace, minReplicaCount, 60, 1), "replica count should be 0 after 1 minute") - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, firefoxDeploymentName, testNamespace, minReplicaCount, 60, 1), + require.True(t, WaitForDeploymentReplicaReadyCount(t, kc, firefoxDeploymentName, testNamespace, minReplicaCount, 60, 1), "replica count should be 0 after 1 minute") - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, edgeDeploymentName, testNamespace, minReplicaCount, 60, 1), + require.True(t, WaitForDeploymentReplicaReadyCount(t, kc, edgeDeploymentName, testNamespace, minReplicaCount, 60, 1), "replica count should be 0 after 1 minute") testActivation(t, kc, data) testScaleOut(t, kc, data) testScaleIn(t, kc) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/scalers/solace/solace_test.go b/tests/scalers/solace/solace_test.go index 000a62d38cf..13ead35fa2e 100644 --- a/tests/scalers/solace/solace_test.go +++ b/tests/scalers/solace/solace_test.go @@ -9,6 +9,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -204,9 +205,15 @@ spec: ) func TestStanScaler(t *testing.T) { - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplateRate", scaledObjectTemplateRate) + uninstallSolace(t) + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources CreateKubernetesResources(t, kc, testNamespace, data, templates) installSolace(t) KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) @@ -222,26 +229,21 @@ func TestStanScaler(t *testing.T) { testActivationRate(t, kc) testScaleOutRate(t, kc) testScaleInRate(t, kc) - - // cleanup - KubectlDeleteWithTemplate(t, data, "scaledObjectTemplateRate", scaledObjectTemplateRate) - uninstallSolace(t) - DeleteKubernetesResources(t, testNamespace, data, templates) } func installSolace(t *testing.T) { _, err := ExecuteCommand("helm repo add solacecharts https://solaceproducts.github.io/pubsubplus-kubernetes-helm-quickstart/helm-charts") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --set solace.usernameAdminPassword=KedaLabAdminPwd1 --set storage.persistent=false,solace.size=dev,nameOverride=pubsubplus-dev,service.type=ClusterIP --namespace %s kedalab solacecharts/pubsubplus`, testNamespace)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("sleep 60") // there is a bug in the solace helm chart where it is looking for the wrong number of replicas on --wait - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) // Create the pubsub broker _, _, err = ExecCommandOnSpecificPod(t, helperName, testNamespace, "./config/config_solace.sh") - assert.NoErrorf(t, err, "cannot execute command - %s", err) + require.NoErrorf(t, err, "cannot execute command - %s", err) } func uninstallSolace(t *testing.T) { diff --git a/tests/scalers/solr/solr_test.go b/tests/scalers/solr/solr_test.go index 22736a9ec3a..d35418382d9 100644 --- a/tests/scalers/solr/solr_test.go +++ b/tests/scalers/solr/solr_test.go @@ -13,6 +13,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" // For helper methods @@ -177,9 +178,13 @@ spec: ) func TestSolrScaler(t *testing.T) { - // Create kubernetes resources kc := GetKubernetesClient(t) data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources CreateKubernetesResources(t, kc, testNamespace, data, templates) // setup solr @@ -192,23 +197,22 @@ func TestSolrScaler(t *testing.T) { testActivation(t, kc) testScaleOut(t, kc) testScaleIn(t, kc) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) } func setupSolr(t *testing.T, kc *kubernetes.Clientset) { - assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, "solr", testNamespace, 1, 60, 3), + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, "solr", testNamespace, 1, 60, 3), "solr should be up") err := checkIfSolrStatusIsReady(t, solrPodName) - assert.NoErrorf(t, err, "%s", err) + require.NoErrorf(t, err, "%s", err) // Create the collection - out, errOut, _ := ExecCommandOnSpecificPod(t, solrPodName, testNamespace, fmt.Sprintf("%s create_core -c %s", solrPath, solrCollection)) + out, errOut, err := ExecCommandOnSpecificPod(t, solrPodName, testNamespace, fmt.Sprintf("%s create_core -c %s", solrPath, solrCollection)) + require.NoErrorf(t, err, "%s", err) t.Logf("Output: %s, Error: %s", out, errOut) // Enable BasicAuth - out, errOut, _ = ExecCommandOnSpecificPod(t, solrPodName, testNamespace, "echo '{\"authentication\":{\"class\":\"solr.BasicAuthPlugin\",\"credentials\":{\"solr\":\"IV0EHq1OnNrj6gvRCwvFwTrZ1+z1oBbnQdiVC3otuq0= Ndd7LKvVBAaZIF0QAVi1ekCfAJXr1GGfLtRUXhgrF8c=\"}},\"authorization\":{\"class\":\"solr.RuleBasedAuthorizationPlugin\",\"permissions\":[{\"name\":\"security-edit\",\"role\":\"admin\"}],\"user-role\":{\"solr\":\"admin\"}}}' > /var/solr/data/security.json") + out, errOut, err = ExecCommandOnSpecificPod(t, solrPodName, testNamespace, "echo '{\"authentication\":{\"class\":\"solr.BasicAuthPlugin\",\"credentials\":{\"solr\":\"IV0EHq1OnNrj6gvRCwvFwTrZ1+z1oBbnQdiVC3otuq0= Ndd7LKvVBAaZIF0QAVi1ekCfAJXr1GGfLtRUXhgrF8c=\"}},\"authorization\":{\"class\":\"solr.RuleBasedAuthorizationPlugin\",\"permissions\":[{\"name\":\"security-edit\",\"role\":\"admin\"}],\"user-role\":{\"solr\":\"admin\"}}}' > /var/solr/data/security.json") + require.NoErrorf(t, err, "%s", err) t.Logf("Output: %s, Error: %s", out, errOut) // Restart solr to apply auth @@ -216,7 +220,7 @@ func setupSolr(t *testing.T, kc *kubernetes.Clientset) { t.Logf("Output: %s, Error: %s", out, errOut) err = checkIfSolrStatusIsReady(t, solrPodName) - assert.NoErrorf(t, err, "%s", err) + require.NoErrorf(t, err, "%s", err) t.Log("--- BasicAuth plugin activated ---") t.Log("--- solr is ready ---") diff --git a/tests/sequential/opentelemetry_metrics/opentelemetry_metrics_test.go b/tests/sequential/opentelemetry_metrics/opentelemetry_metrics_test.go index fbea5e3378b..2b7309776db 100644 --- a/tests/sequential/opentelemetry_metrics/opentelemetry_metrics_test.go +++ b/tests/sequential/opentelemetry_metrics/opentelemetry_metrics_test.go @@ -703,7 +703,6 @@ func testScalerMetricLatency(t *testing.T) { for _, label := range labels { if (*label.Name == labelScaledObject && *label.Value == scaledObjectName) || (*label.Name == labelScaledJob && *label.Value == scaledJobName) { - assert.Equal(t, float64(0), *metric.Gauge.Value) found = true } }