diff --git a/Dockerfile b/Dockerfile index 859405ca..18fb7de5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22 +FROM golang:1.23 ENV GOPATH /root/go ENV PATH ${PATH}:/root/go/bin diff --git a/framework/set/provisioning/multiclusters/setMultiCluster.go b/framework/set/provisioning/multiclusters/setMultiCluster.go index b04fecc6..9df5cfe5 100644 --- a/framework/set/provisioning/multiclusters/setMultiCluster.go +++ b/framework/set/provisioning/multiclusters/setMultiCluster.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/hcl/v2/hclwrite" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/pkg/config/operations" namegen "github.com/rancher/shepherd/pkg/namegenerator" configuration "github.com/rancher/tfp-automation/config" "github.com/rancher/tfp-automation/defaults/clustertypes" @@ -24,15 +25,17 @@ import ( ) // SetMultiCluster is a function that will set multiple cluster configurations in the main.tf file. -func SetMultiCluster(client *rancher.Client, rancherConfig *rancher.Config, configMap []map[string]any, clusterName string, newFile *hclwrite.File, rootBody *hclwrite.Body, file *os.File, rbacRole configuration.Role, poolName string) error { +func SetMultiCluster(client *rancher.Client, rancherConfig *rancher.Config, configMap []map[string]any, clusterName string, newFile *hclwrite.File, rootBody *hclwrite.Body, file *os.File, rbacRole configuration.Role, poolName string) ([]string, error) { var err error + clusterNames := []string{} customClusterNames := []string{} - for i, terratestConfig := range configMap { - - terraformConfig := terratestConfig["terraform"].(configuration.TerraformConfig) - terratestConfig := terratestConfig["terratest"].(configuration.TerratestConfig) + for i, config := range configMap { + terraformConfig := new(configuration.TerraformConfig) + operations.LoadObjectFromMap(configuration.TerraformConfigurationFileKey, config, terraformConfig) + terratestConfig := new(configuration.TerratestConfig) + operations.LoadObjectFromMap(configuration.TerratestConfigurationFileKey, config, terratestConfig) kubernetesVersion := terratestConfig.KubernetesVersion nodePools := terratestConfig.Nodepools @@ -45,57 +48,61 @@ func SetMultiCluster(client *rancher.Client, rancherConfig *rancher.Config, conf terraformConfig.ClusterName = clusterName poolName = namegen.AppendRandomString(configs.TFP) + clusterNames = append(clusterNames, clusterName) + if terraformConfig.Module == modules.CustomEC2RKE2 || terraformConfig.Module == modules.CustomEC2K3s { customClusterNames = append(customClusterNames, clusterName) } switch { case module == clustertypes.AKS: - file, err = hosted.SetAKS(&terraformConfig, clusterName, kubernetesVersion, nodePools, newFile, rootBody, file) + file, err = hosted.SetAKS(terraformConfig, clusterName, kubernetesVersion, nodePools, newFile, rootBody, file) if err != nil { - return err + return clusterNames, err } case module == clustertypes.EKS: - file, err = hosted.SetEKS(&terraformConfig, clusterName, kubernetesVersion, nodePools, newFile, rootBody, file) + file, err = hosted.SetEKS(terraformConfig, clusterName, kubernetesVersion, nodePools, newFile, rootBody, file) if err != nil { - return err + return clusterNames, err } case module == clustertypes.GKE: - file, err = hosted.SetGKE(&terraformConfig, clusterName, kubernetesVersion, nodePools, newFile, rootBody, file) + file, err = hosted.SetGKE(terraformConfig, clusterName, kubernetesVersion, nodePools, newFile, rootBody, file) if err != nil { - return err + return clusterNames, err } case strings.Contains(module, clustertypes.RKE1) && !strings.Contains(module, defaults.Custom): - file, err = nodedriver.SetRKE1(&terraformConfig, clusterName, poolName, kubernetesVersion, psact, nodePools, + file, err = nodedriver.SetRKE1(terraformConfig, clusterName, poolName, kubernetesVersion, psact, nodePools, snapshotInput, newFile, rootBody, file, rbacRole) if err != nil { - return err + return clusterNames, err } case (strings.Contains(module, clustertypes.RKE2) || strings.Contains(module, clustertypes.K3S)) && !strings.Contains(module, defaults.Custom): - file, err = nodedriverV2.SetRKE2K3s(client, &terraformConfig, clusterName, poolName, kubernetesVersion, psact, nodePools, + file, err = nodedriverV2.SetRKE2K3s(client, terraformConfig, clusterName, poolName, kubernetesVersion, psact, nodePools, snapshotInput, newFile, rootBody, file, rbacRole) if err != nil { - return err + return clusterNames, err } case module == modules.CustomEC2RKE1: - file, err = custom.SetCustomRKE1(rancherConfig, &terraformConfig, &terratestConfig, configMap, clusterName, newFile, rootBody, file) + file, err = custom.SetCustomRKE1(rancherConfig, terraformConfig, terratestConfig, configMap, clusterName, newFile, rootBody, file) if err != nil { - return err + return clusterNames, err } case module == modules.CustomEC2RKE2 || module == modules.CustomEC2K3s: - file, err = customV2.SetCustomRKE2K3s(rancherConfig, &terraformConfig, &terratestConfig, configMap, clusterName, newFile, rootBody, file) + file, err = customV2.SetCustomRKE2K3s(rancherConfig, terraformConfig, terratestConfig, configMap, clusterName, newFile, rootBody, file) if err != nil { - return err + return clusterNames, err } case module == modules.AirgapRKE2 || module == modules.AirgapK3S: - file, err = airgap.SetAirgapRKE2K3s(rancherConfig, &terraformConfig, &terratestConfig, nil, clusterName, newFile, rootBody, file) - return err + file, err = airgap.SetAirgapRKE2K3s(rancherConfig, terraformConfig, terratestConfig, nil, clusterName, newFile, rootBody, file) + if err != nil { + return clusterNames, err + } default: logrus.Errorf("Unsupported module: %v", module) } if i == len(configMap)-1 { - file, err = locals.SetLocals(rootBody, &terraformConfig, configMap, clusterName, newFile, file, customClusterNames) + file, err = locals.SetLocals(rootBody, terraformConfig, configMap, clusterName, newFile, file, customClusterNames) } } @@ -103,14 +110,14 @@ func SetMultiCluster(client *rancher.Client, rancherConfig *rancher.Config, conf file, err = os.Create(keyPath + configs.MainTF) if err != nil { logrus.Infof("Failed to reset/overwrite main.tf file. Error: %v", err) - return err + return clusterNames, err } _, err = file.Write(newFile.Bytes()) if err != nil { logrus.Infof("Failed to write RKE2/K3S configurations to main.tf file. Error: %v", err) - return err + return clusterNames, err } - return nil + return clusterNames, nil } diff --git a/framework/set/setConfigTF.go b/framework/set/setConfigTF.go index e8998845..24d41fa3 100644 --- a/framework/set/setConfigTF.go +++ b/framework/set/setConfigTF.go @@ -23,16 +23,17 @@ import ( // ConfigTF is a function that will set the main.tf file based on the module type. func ConfigTF(client *rancher.Client, rancherConfig *rancher.Config, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, - testUser, testPassword, clusterName, poolName string, rbacRole config.Role, configMap []map[string]any) error { + testUser, testPassword, clusterName, poolName string, rbacRole config.Role, configMap []map[string]any) ([]string, error) { module := terraformConfig.Module + clusterNames := []string{clusterName} var file *os.File keyPath := resources.SetKeyPath() file, err := os.Create(keyPath + configs.MainTF) if err != nil { logrus.Infof("Failed to reset/overwrite main.tf file. Error: %v", err) - return err + return nil, err } defer file.Close() @@ -42,40 +43,40 @@ func ConfigTF(client *rancher.Client, rancherConfig *rancher.Config, terraformCo rootBody.AppendNewline() if terraformConfig.MultiCluster { - err = multiclusters.SetMultiCluster(client, rancherConfig, configMap, clusterName, newFile, rootBody, file, rbacRole, poolName) - return err + clusterNames, err = multiclusters.SetMultiCluster(client, rancherConfig, configMap, clusterName, newFile, rootBody, file, rbacRole, poolName) + return clusterNames, err } else { switch { case module == clustertypes.AKS: _, err = hosted.SetAKS(terraformConfig, clusterName, terratestConfig.KubernetesVersion, terratestConfig.Nodepools, newFile, rootBody, file) - return err + return clusterNames, err case module == clustertypes.EKS: _, err = hosted.SetEKS(terraformConfig, clusterName, terratestConfig.KubernetesVersion, terratestConfig.Nodepools, newFile, rootBody, file) - return err + return clusterNames, err case module == clustertypes.GKE: _, err = hosted.SetGKE(terraformConfig, clusterName, terratestConfig.KubernetesVersion, terratestConfig.Nodepools, newFile, rootBody, file) - return err + return clusterNames, err case strings.Contains(module, clustertypes.RKE1) && !strings.Contains(module, defaults.Custom): _, err = nodedriver.SetRKE1(terraformConfig, clusterName, poolName, terratestConfig.KubernetesVersion, terratestConfig.PSACT, terratestConfig.Nodepools, terratestConfig.SnapshotInput, newFile, rootBody, file, rbacRole) - return err + return clusterNames, err case (strings.Contains(module, clustertypes.RKE2) || strings.Contains(module, clustertypes.K3S)) && !strings.Contains(module, defaults.Custom) && !strings.Contains(module, defaults.Airgap): _, err = nodedriverV2.SetRKE2K3s(client, terraformConfig, clusterName, poolName, terratestConfig.KubernetesVersion, terratestConfig.PSACT, terratestConfig.Nodepools, terratestConfig.SnapshotInput, newFile, rootBody, file, rbacRole) - return err + return clusterNames, err case module == modules.CustomEC2RKE1: _, err = custom.SetCustomRKE1(rancherConfig, terraformConfig, terratestConfig, nil, clusterName, newFile, rootBody, file) - return err + return clusterNames, err case module == modules.CustomEC2RKE2 || module == modules.CustomEC2K3s: _, err = customV2.SetCustomRKE2K3s(rancherConfig, terraformConfig, terratestConfig, nil, clusterName, newFile, rootBody, file) - return err + return clusterNames, err case module == modules.AirgapRKE2 || module == modules.AirgapK3S: _, err = airgap.SetAirgapRKE2K3s(rancherConfig, terraformConfig, terratestConfig, nil, clusterName, newFile, rootBody, file) - return err + return clusterNames, err default: logrus.Errorf("Unsupported module: %v", module) } - return nil + return clusterNames, nil } } diff --git a/go.mod b/go.mod index 3c2d4785..f9e02d56 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/rancher/tfp-automation -go 1.22.0 +go 1.23.0 -toolchain go1.22.3 +toolchain go1.23.4 replace ( github.com/containerd/containerd => github.com/containerd/containerd v1.6.27 // for compatibilty with docker 20.10.x @@ -29,10 +29,10 @@ require ( require ( github.com/antihax/optional v1.0.0 github.com/gruntwork-io/terratest v0.42.0 - github.com/rancher/norman v0.0.0-20241001183610-78a520c160ab - github.com/rancher/rancher v0.0.0-20241111070012-ce59843e7b2b + github.com/rancher/norman v0.5.1 + github.com/rancher/rancher v0.0.0-20250122213954-464e5c27fe8d github.com/rancher/rancher/pkg/apis v0.0.0 - github.com/rancher/shepherd v0.0.0-20241111160715-689bb4ad6d39 + github.com/rancher/shepherd v0.0.0-20250106223550-9350f4861af3 github.com/sirupsen/logrus v1.9.3 ) @@ -92,11 +92,11 @@ require ( github.com/ulikunitz/xz v0.5.10 // indirect github.com/zclconf/go-cty v1.14.1 go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/net v0.30.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect google.golang.org/api v0.201.0 // indirect google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 // indirect google.golang.org/grpc v1.67.1 // indirect @@ -152,9 +152,9 @@ require ( github.com/rancher/aks-operator v1.10.0 // indirect github.com/rancher/apiserver v0.0.0-20241009200134-5a4ecca7b988 // indirect github.com/rancher/eks-operator v1.10.0 // indirect - github.com/rancher/fleet/pkg/apis v0.11.0 // indirect + github.com/rancher/fleet/pkg/apis v0.12.0-alpha.2 // indirect github.com/rancher/gke-operator v1.10.0 // indirect - github.com/rancher/lasso v0.0.0-20240924233157-8f384efc8813 // indirect + github.com/rancher/lasso v0.0.0-20241202185148-04649f379358 // indirect github.com/rancher/rke v1.7.0-rc.5 // indirect github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20240301001845-4eacc2dabbde // indirect github.com/rancher/wrangler v1.1.2 // indirect @@ -164,8 +164,8 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect go.qase.io/client v0.0.0-20231114201952-65195ec001fa go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/term v0.25.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/term v0.27.0 // indirect golang.org/x/time v0.7.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect diff --git a/go.sum b/go.sum index c5139228..51ce9e85 100644 --- a/go.sum +++ b/go.sum @@ -535,24 +535,24 @@ github.com/rancher/apiserver v0.0.0-20241009200134-5a4ecca7b988 h1:e7wP0J4JQdG1F github.com/rancher/apiserver v0.0.0-20241009200134-5a4ecca7b988/go.mod h1:79I0nWWx1+D7lDwHrHxN5O+QzC2EPqJC46KfaMkd14w= github.com/rancher/eks-operator v1.10.0 h1:a3l3nmoIf5EiYS4BQ+a9Z8+0WwZ3duek6gnrT6VZKwk= github.com/rancher/eks-operator v1.10.0/go.mod h1:coW31jIfImAHdGsepc7yCXSuixdclQkJn3y26E9tsss= -github.com/rancher/fleet/pkg/apis v0.11.0 h1:4OjUfgGdGMQUOHDI8HWN79N9P4U5g9XiPCCbrkZVOMo= -github.com/rancher/fleet/pkg/apis v0.11.0/go.mod h1:8nvuO8x0z7ydpW0eZJEEEPHI0Bmb9T5L3igH0t+0dDk= +github.com/rancher/fleet/pkg/apis v0.12.0-alpha.2 h1:bHgFWuz2vy0uaBBmHbR6xYjJTLaeAuXDUrL8PiYXpxs= +github.com/rancher/fleet/pkg/apis v0.12.0-alpha.2/go.mod h1:kWdjnTs14K8pinSAFb3votOgoEUHhAZ0onLIw9Tv404= github.com/rancher/gke-operator v1.10.0 h1:vV9jLErnH5VRBpK/kCzem8T7/yEDqLVXIcv20Or7e7I= github.com/rancher/gke-operator v1.10.0/go.mod h1:k3oIJMCilpaLHeHPRy90S3pfZ05vbe+b+g1ISiHQbLo= -github.com/rancher/lasso v0.0.0-20240924233157-8f384efc8813 h1:V/LY8pUHZG9Kc+xEDWDOryOnCU6/Q+Lsr9QQEQnshpU= -github.com/rancher/lasso v0.0.0-20240924233157-8f384efc8813/go.mod h1:IxgTBO55lziYhTEETyVKiT8/B5Rg92qYiRmcIIYoPgI= -github.com/rancher/norman v0.0.0-20241001183610-78a520c160ab h1:ihK6See3y/JilqZlc0CG7NXPN+ue5nY9U7xUZUA8M7I= -github.com/rancher/norman v0.0.0-20241001183610-78a520c160ab/go.mod h1:qX/OG/4wY27xSAcSdRilUBxBumV6Ey2CWpAeaKnBQDs= +github.com/rancher/lasso v0.0.0-20241202185148-04649f379358 h1:pJwgJXPt4fi0ysXsJcl28rvxhx/Z/9SNCDwFOEyeGu0= +github.com/rancher/lasso v0.0.0-20241202185148-04649f379358/go.mod h1:IxgTBO55lziYhTEETyVKiT8/B5Rg92qYiRmcIIYoPgI= +github.com/rancher/norman v0.5.1 h1:jbp49IcX2Hn+N2QA3MHdIXeUG0VgCSIjJs4xnqG+j90= +github.com/rancher/norman v0.5.1/go.mod h1:qX/OG/4wY27xSAcSdRilUBxBumV6Ey2CWpAeaKnBQDs= github.com/rancher/qase-go/client v0.0.0-20240308221502-c3b2635212be h1:+m6Jv5sAI4i5NwVYYUccJUK+ecp8eHInkZfMD8BgNjU= github.com/rancher/qase-go/client v0.0.0-20240308221502-c3b2635212be/go.mod h1:NP3xboG+t2p+XMnrcrJ/L384Ki0Cp3Pww/X+vm5Jcy0= -github.com/rancher/rancher v0.0.0-20241111070012-ce59843e7b2b h1:ZKoQb87TdvWlmHnrXewGuErvIhs+32GkRNVz9Hreg5k= -github.com/rancher/rancher v0.0.0-20241111070012-ce59843e7b2b/go.mod h1:zL7BbbqUHVmVT3ESkX/4Jnaqrzt2yD2Vcqsp5u5YGcQ= +github.com/rancher/rancher v0.0.0-20250122213954-464e5c27fe8d h1:3xS1xPR5trYMNeXFXob32/KRylbKz0PVN1Cj8tWP9Tg= +github.com/rancher/rancher v0.0.0-20250122213954-464e5c27fe8d/go.mod h1:9/0ouBe+wb7/kV8jsJgv0CZcP+Eln10cQCTdvLxfvyc= github.com/rancher/rancher/pkg/apis v0.0.0-20240821173544-58647454340d h1:8Jj/FV48AFTbivWiJOk59YPTRYHi4HS1qoLUv+m4uuY= github.com/rancher/rancher/pkg/apis v0.0.0-20240821173544-58647454340d/go.mod h1:aQ7WsDz/lGZlBVr2g5V7N0zViuXnDFdo2TYXFKYDBQ4= github.com/rancher/rke v1.7.0-rc.5 h1:kBRwXTW8CYPXvCcPLISiwGTCvJ8K/+b35D5ES0IcduM= github.com/rancher/rke v1.7.0-rc.5/go.mod h1:+x++Mvl0A3jIzNLiu8nkraqZXiHg6VPWv0Xl4iQCg+A= -github.com/rancher/shepherd v0.0.0-20241111160715-689bb4ad6d39 h1:e1uHXi0pEkGygGPU7t2i397EoSZW8mnMMbMng/rASBk= -github.com/rancher/shepherd v0.0.0-20241111160715-689bb4ad6d39/go.mod h1:J++Mv5VQPMrxeHO3drDJojpo8PXrfib/psOzmjRl7G4= +github.com/rancher/shepherd v0.0.0-20250106223550-9350f4861af3 h1:JyChQ+XKyU/wDHmTzXRdPzZk1YvUd85cHnjoB0pDmeQ= +github.com/rancher/shepherd v0.0.0-20250106223550-9350f4861af3/go.mod h1:urZvZCFSgT+9NVjAV0y8v+pzuqziaS3aYfoMfk9TENw= github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20240301001845-4eacc2dabbde h1:x5VZI/0TUx1MeZirh6e0OMAInhCmq6yRvD6897458Ng= github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20240301001845-4eacc2dabbde/go.mod h1:04o7UUy7ZFiMDEtHEjO1yS7IkO8TcsgjBl93Fcjq7Gg= github.com/rancher/wrangler v1.1.2 h1:oXbXo9k7y/H4drUpb4RM1c++vT9O3rpoNEfyusGykiU= @@ -650,8 +650,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -736,8 +736,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -780,8 +780,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -847,14 +847,14 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -865,8 +865,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/tests/airgap/airgap_provisioning_test.go b/tests/airgap/airgap_provisioning_test.go index b1044d82..36d65e26 100644 --- a/tests/airgap/airgap_provisioning_test.go +++ b/tests/airgap/airgap_provisioning_test.go @@ -116,8 +116,8 @@ func (a *TfpAirgapProvisioningTestSuite) TestTfpAirgapProvisioning() { keyPath := rancher2.SetKeyPath() defer cleanup.Cleanup(a.T(), a.terraformOptions, keyPath) - provisioning.Provision(a.T(), a.client, a.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, a.terraformOptions, nil) - provisioning.VerifyCluster(a.T(), a.client, clusterName, &terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(a.T(), a.client, a.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, a.terraformOptions, nil) + provisioning.VerifyClustersState(a.T(), a.client, clusterIDs) }) } @@ -154,11 +154,11 @@ func (a *TfpAirgapProvisioningTestSuite) TestTfpAirgapUpgrading() { keyPath := rancher2.SetKeyPath() defer cleanup.Cleanup(a.T(), a.terraformOptions, keyPath) - provisioning.Provision(a.T(), a.client, a.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, a.terraformOptions, nil) - provisioning.VerifyCluster(a.T(), a.client, clusterName, &terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(a.T(), a.client, a.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, a.terraformOptions, nil) + provisioning.VerifyClustersState(a.T(), a.client, clusterIDs) provisioning.KubernetesUpgrade(a.T(), a.client, a.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, a.terraformOptions) - provisioning.VerifyCluster(a.T(), a.client, clusterName, &terraformConfig, &terratestConfig) + provisioning.VerifyClustersState(a.T(), a.client, clusterIDs) }) } diff --git a/tests/extensions/provisioning/buildModule.go b/tests/extensions/provisioning/buildModule.go index 52c84268..4f7c8f41 100644 --- a/tests/extensions/provisioning/buildModule.go +++ b/tests/extensions/provisioning/buildModule.go @@ -26,7 +26,7 @@ func BuildModule(t *testing.T) error { keyPath := resources.SetKeyPath() - err := framework.ConfigTF(nil, rancherConfig, terraformConfig, terratestConfig, "", "", "", "", "", nil) + _, err := framework.ConfigTF(nil, rancherConfig, terraformConfig, terratestConfig, "", "", "", "", "", nil) if err != nil { return err } diff --git a/tests/extensions/provisioning/kubernetesUpgrade.go b/tests/extensions/provisioning/kubernetesUpgrade.go index a1f5ef26..84cec0b5 100644 --- a/tests/extensions/provisioning/kubernetesUpgrade.go +++ b/tests/extensions/provisioning/kubernetesUpgrade.go @@ -16,7 +16,7 @@ func KubernetesUpgrade(t *testing.T, client *rancher.Client, rancherConfig *ranc terratestConfig *config.TerratestConfig, testUser, testPassword, clusterName, poolName string, terraformOptions *terraform.Options) { DefaultUpgradedK8sVersion(t, client, terratestConfig, terraformConfig) - err := framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) + _, err := framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) require.NoError(t, err) terraform.Apply(t, terraformOptions) diff --git a/tests/extensions/provisioning/provision.go b/tests/extensions/provisioning/provision.go index dc1a4c0d..e3296ab7 100644 --- a/tests/extensions/provisioning/provision.go +++ b/tests/extensions/provisioning/provision.go @@ -5,6 +5,7 @@ import ( "github.com/gruntwork-io/terratest/modules/terraform" "github.com/rancher/shepherd/clients/rancher" + clusterExtensions "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/tfp-automation/config" framework "github.com/rancher/tfp-automation/framework/set" "github.com/stretchr/testify/require" @@ -12,20 +13,32 @@ import ( // Provision is a function that will run terraform init and apply Terraform resources to provision a cluster. func Provision(t *testing.T, client *rancher.Client, rancherConfig *rancher.Config, terraformConfig *config.TerraformConfig, - terratestConfig *config.TerratestConfig, testUser, testPassword, clusterName, poolName string, terraformOptions *terraform.Options, configMap []map[string]any) { + terratestConfig *config.TerratestConfig, testUser, testPassword, clusterName, poolName string, terraformOptions *terraform.Options, configMap []map[string]any) []string { + var err error + var clusterNames []string + var clusterIDs []string if !terraformConfig.MultiCluster { isSupported := SupportedModules(terraformConfig, terraformOptions, nil) require.True(t, isSupported) - err := framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) + clusterNames, err = framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) require.NoError(t, err) } else { isSupported := SupportedModules(terraformConfig, terraformOptions, configMap) require.True(t, isSupported) - err := framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", configMap) + clusterNames, err = framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", configMap) require.NoError(t, err) } terraform.InitAndApply(t, terraformOptions) + + for _, clusterName := range clusterNames { + clusterID, err := clusterExtensions.GetClusterIDByName(client, clusterName) + require.NoError(t, err) + + clusterIDs = append(clusterIDs, clusterID) + } + + return clusterIDs } diff --git a/tests/extensions/provisioning/scale.go b/tests/extensions/provisioning/scale.go index 1a139fd2..3bf2fb84 100644 --- a/tests/extensions/provisioning/scale.go +++ b/tests/extensions/provisioning/scale.go @@ -14,7 +14,7 @@ import ( // cluster, according to user's desired amount. func Scale(t *testing.T, client *rancher.Client, rancherConfig *rancher.Config, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, testUser, testPassword, clusterName, poolName string, terraformOptions *terraform.Options) { - err := framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) + _, err := framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) require.NoError(t, err) terraform.Apply(t, terraformOptions) diff --git a/tests/extensions/provisioning/verify.go b/tests/extensions/provisioning/verify.go index 06ae563b..e008f270 100644 --- a/tests/extensions/provisioning/verify.go +++ b/tests/extensions/provisioning/verify.go @@ -4,62 +4,158 @@ import ( "strings" "testing" + apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" clusterActions "github.com/rancher/rancher/tests/v2/actions/clusters" "github.com/rancher/rancher/tests/v2/actions/psact" "github.com/rancher/rancher/tests/v2/actions/registries" + "github.com/rancher/rancher/tests/v2/actions/workloads/cronjob" + "github.com/rancher/rancher/tests/v2/actions/workloads/daemonset" + "github.com/rancher/rancher/tests/v2/actions/workloads/deployment" + "github.com/rancher/rancher/tests/v2/actions/workloads/statefulset" "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" clusterExtensions "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/workloads/pods" "github.com/rancher/tfp-automation/config" "github.com/rancher/tfp-automation/defaults/clustertypes" + "github.com/rancher/tfp-automation/defaults/stevetypes" waitState "github.com/rancher/tfp-automation/framework/wait/state" "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// VerifyCluster validates that a downstream cluster and its resources are in a good state, matching a given config. -func VerifyCluster(t *testing.T, client *rancher.Client, clusterName string, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig) { - var expectedKubernetesVersion string - module := terraformConfig.Module - expectedKubernetesVersion = checkExpectedKubernetesVersion(t, terratestConfig, expectedKubernetesVersion, module) +// VerifyClustersState validates that all clusters are active and have no pod errors. +func VerifyClustersState(t *testing.T, client *rancher.Client, clusterIDs []string) { + for _, clusterID := range clusterIDs { + cluster, err := client.Management.Cluster.ByID(clusterID) + require.NoError(t, err) - clusterID, err := clusterExtensions.GetClusterIDByName(client, clusterName) - require.NoError(t, err) + logrus.Infof("Waiting for cluster %v to be in an active state...", cluster.Name) + if err := waitState.IsActiveCluster(client, clusterID); err != nil { + require.NoError(t, err) + } + + if err := waitState.AreNodesActive(client, clusterID); err != nil { + require.NoError(t, err) + } + + clusterName, err := clusterExtensions.GetClusterNameByID(client, clusterID) + require.NoError(t, err) + + clusterToken, err := clusterActions.CheckServiceAccountTokenSecret(client, clusterName) + require.NoError(t, err) + require.NotEmpty(t, clusterToken) + + podErrors := pods.StatusPods(client, cluster.ID) + require.Empty(t, podErrors) + + v1ClusterID, err := clusterExtensions.GetV1ProvisioningClusterByName(client, clusterName) + require.NoError(t, err) - logrus.Infof("Waiting for cluster %v to be in an active state...", clusterName) - if err := waitState.IsActiveCluster(client, clusterID); err != nil { + var v1Cluster *v1.SteveAPIObject + if v1ClusterID == "" { + v1Cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + clusterID) + require.NoError(t, err) + require.NotEmpty(t, v1Cluster) + } else { + v1Cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID(v1ClusterID) + require.NoError(t, err) + require.NotEmpty(t, v1Cluster) + } + + clusterObj := new(apisV1.Cluster) + err = v1.ConvertToK8sType(v1Cluster, &clusterObj) require.NoError(t, err) + + if clusterObj.Spec.RKEConfig != nil { + if clusterObj.Spec.RKEConfig.Registries != nil { + for registryURL := range clusterObj.Spec.RKEConfig.Registries.Configs { + _, err := registries.CheckAllClusterPodsForRegistryPrefix(client, clusterID, registryURL) + require.NoError(t, err) + } + } + } + } +} + +// VerifyWorkloads validates that different workload operations and workload types are able to provision successfully +func VerifyWorkloads(t *testing.T, client *rancher.Client, clusterIDs []string) { + workloadValidations := []struct { + name string + validationFunc func(client *rancher.Client, clusterID string) error + }{ + {"WorkloadDeployment", deployment.VerifyCreateDeployment}, + {"WorkloadSideKick", deployment.VerifyCreateDeploymentSideKick}, + {"WorkloadDaemonSet", daemonset.VerifyCreateDaemonSet}, + {"WorkloadCronjob", cronjob.VerifyCreateCronjob}, + {"WorkloadStatefulset", statefulset.VerifyCreateStatefulset}, + {"WorkloadUpgrade", deployment.VerifyDeploymentUpgradeRollback}, + {"WorkloadPodScaleUp", deployment.VerifyDeploymentPodScaleUp}, + {"WorkloadPodScaleDown", deployment.VerifyDeploymentPodScaleDown}, + {"WorkloadPauseOrchestration", deployment.VerifyDeploymentPauseOrchestration}, } - if err := waitState.AreNodesActive(client, clusterID); err != nil { + for _, clusterID := range clusterIDs { + clusterName, err := clusterExtensions.GetClusterNameByID(client, clusterID) require.NoError(t, err) + + logrus.Infof("Validating workloads (%s)", clusterName) + for _, workloadValidation := range workloadValidations { + retries := 3 + for i := 0; i+1 < retries; i++ { + err = workloadValidation.validationFunc(client, clusterID) + if err != nil { + logrus.Info(err) + logrus.Infof("Retry %v / %v", i+1, retries) + continue + } + + break + } + require.NoError(t, err) + } } +} - cluster, err := client.Management.Cluster.ByID(clusterID) - require.NoError(t, err) +// VerifyClusterPSACT validates that psact clusters can provision an nginx deployment +func VerifyClusterPSACT(t *testing.T, client *rancher.Client, clusterIDs []string) { + for _, clusterID := range clusterIDs { + cluster, err := client.Management.Cluster.ByID(clusterID) + require.NoError(t, err) - // EKS is formatted this way due to EKS formatting Kubernetes versions with a random string of letters after the version. - if module == clustertypes.EKS { - assert.Equal(t, expectedKubernetesVersion, cluster.Version.GitVersion[1:5]) - } else { - assert.Equal(t, expectedKubernetesVersion, cluster.Version.GitVersion) + psactName := cluster.DefaultPodSecurityAdmissionConfigurationTemplateName + if psactName == string(config.RancherPrivileged) || psactName == string(config.RancherRestricted) { + err := psact.CreateNginxDeployment(client, clusterID, psactName) + require.NoError(t, err) + } } +} - clusterToken, err := clusterActions.CheckServiceAccountTokenSecret(client, cluster.Name) +// VerifyKubernetesVersion validates the expected Kubernetes version. +func VerifyKubernetesVersion(t *testing.T, client *rancher.Client, clusterID, expectedKubernetesVersion, module string) { + cluster, err := client.Management.Cluster.ByID(clusterID) require.NoError(t, err) - assert.NotEmpty(t, clusterToken) - if terratestConfig.PSACT == string(config.RancherPrivileged) || terratestConfig.PSACT == string(config.RancherRestricted) { - require.NotEmpty(t, cluster.DefaultPodSecurityAdmissionConfigurationTemplateName) + switch { + case module == clustertypes.AKS || module == clustertypes.GKE: + expectedKubernetesVersion = `v` + expectedKubernetesVersion + require.Equal(t, expectedKubernetesVersion, cluster.Version.GitVersion) - err := psact.CreateNginxDeployment(client, clusterID, terratestConfig.PSACT) - require.NoError(t, err) - } + // Terraform requires that we input the entire RKE1 version. However, Rancher client clips the `-rancher` suffix. + case strings.Contains(module, clustertypes.RKE1): + expectedKubernetesVersion = expectedKubernetesVersion[:len(expectedKubernetesVersion)-11] + require.Equal(t, expectedKubernetesVersion, cluster.Version.GitVersion) + + case strings.Contains(module, clustertypes.EKS): + require.Equal(t, expectedKubernetesVersion, cluster.Version.GitVersion[1:5]) - podErrors := pods.StatusPods(client, cluster.ID) - assert.Empty(t, podErrors) + default: + logrus.Errorf("Invalid module provided") + } +} +// VerifyRegistry validates that the expected registry is set. +func VerifyRegistry(t *testing.T, client *rancher.Client, clusterID string, terraformConfig *config.TerraformConfig) { if terraformConfig.PrivateRegistries != nil { _, err := registries.CheckAllClusterPodsForRegistryPrefix(client, clusterID, terraformConfig.PrivateRegistries.URL) require.NoError(t, err) @@ -102,22 +198,3 @@ func VerifyNodeCount(t *testing.T, client *rancher.Client, clusterName string, t logrus.Errorf("Unsupported module: %v", module) } } - -// checkExpectedKubernetesVersion is a helper function that verifies the expected Kubernetes version. -func checkExpectedKubernetesVersion(t *testing.T, terratestConfig *config.TerratestConfig, expectedKubernetesVersion, module string) string { - switch { - case module == clustertypes.AKS || module == clustertypes.GKE: - expectedKubernetesVersion = `v` + terratestConfig.KubernetesVersion - // Terraform requires that we input the entire RKE1 version. However, Rancher client clips the `-rancher` suffix. - case strings.Contains(module, clustertypes.RKE1): - expectedKubernetesVersion = terratestConfig.KubernetesVersion[:len(terratestConfig.KubernetesVersion)-11] - require.Equal(t, expectedKubernetesVersion, terratestConfig.KubernetesVersion[:len(terratestConfig.KubernetesVersion)-11]) - case strings.Contains(module, clustertypes.EKS) || strings.Contains(module, clustertypes.RKE2) || strings.Contains(module, clustertypes.K3S): - expectedKubernetesVersion = terratestConfig.KubernetesVersion - require.Equal(t, expectedKubernetesVersion, terratestConfig.KubernetesVersion) - default: - logrus.Errorf("Invalid module provided") - } - - return expectedKubernetesVersion -} diff --git a/tests/extensions/rbac/rbac.go b/tests/extensions/rbac/rbac.go index 70d28ede..49ab7f63 100644 --- a/tests/extensions/rbac/rbac.go +++ b/tests/extensions/rbac/rbac.go @@ -14,7 +14,7 @@ import ( func RBAC(t *testing.T, client *rancher.Client, rancherConfig *rancher.Config, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, testUser, testPassword, clusterName, poolName string, terraformOptions *terraform.Options, rbacRole config.Role) { - err := framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, rbacRole, nil) + _, err := framework.ConfigTF(client, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, rbacRole, nil) require.NoError(t, err) terraform.Apply(t, terraformOptions) diff --git a/tests/rancher2/nodescaling/scale_hosted_test.go b/tests/rancher2/nodescaling/scale_hosted_test.go index b7e6be27..4b932909 100644 --- a/tests/rancher2/nodescaling/scale_hosted_test.go +++ b/tests/rancher2/nodescaling/scale_hosted_test.go @@ -77,8 +77,9 @@ func (s *ScaleHostedTestSuite) TestTfpScaleHosted() { adminClient, err := provisioning.FetchAdminClient(s.T(), s.client) require.NoError(s.T(), err) - provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, s.terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, s.terratestConfig) + clusterIDs := provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, s.terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(s.T(), adminClient, clusterIDs) s.terratestConfig.Nodepools = s.terratestConfig.ScalingInput.ScaledUpNodepools @@ -86,7 +87,7 @@ func (s *ScaleHostedTestSuite) TestTfpScaleHosted() { time.Sleep(4 * time.Minute) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, s.terratestConfig) + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) provisioning.VerifyNodeCount(s.T(), s.client, clusterName, s.terraformConfig, s.terratestConfig.ScalingInput.ScaledUpNodeCount) s.terratestConfig.Nodepools = s.terratestConfig.ScalingInput.ScaledDownNodepools @@ -95,7 +96,7 @@ func (s *ScaleHostedTestSuite) TestTfpScaleHosted() { time.Sleep(4 * time.Minute) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, s.terratestConfig) + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) provisioning.VerifyNodeCount(s.T(), s.client, clusterName, s.terraformConfig, s.terratestConfig.ScalingInput.ScaledDownNodeCount) }) } diff --git a/tests/rancher2/nodescaling/scale_test.go b/tests/rancher2/nodescaling/scale_test.go index 090824a8..c9dc65c8 100644 --- a/tests/rancher2/nodescaling/scale_test.go +++ b/tests/rancher2/nodescaling/scale_test.go @@ -2,6 +2,7 @@ package nodescaling import ( "testing" + "time" "github.com/gruntwork-io/terratest/modules/terraform" "github.com/rancher/shepherd/clients/rancher" @@ -98,19 +99,26 @@ func (s *ScaleTestSuite) TestTfpScale() { adminClient, err := provisioning.FetchAdminClient(s.T(), s.client) require.NoError(s.T(), err) - provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(s.T(), adminClient, clusterIDs) terratestConfig.Nodepools = tt.scaleUpNodeRoles provisioning.Scale(s.T(), s.client, s.rancherConfig, s.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, &terratestConfig) + + time.Sleep(2 * time.Minute) + + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) provisioning.VerifyNodeCount(s.T(), s.client, clusterName, s.terraformConfig, scaledUpCount) terratestConfig.Nodepools = tt.scaleDownNodeRoles provisioning.Scale(s.T(), s.client, s.rancherConfig, s.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, &terratestConfig) + + time.Sleep(2 * time.Minute) + + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) provisioning.VerifyNodeCount(s.T(), s.client, clusterName, s.terraformConfig, scaledDownCount) }) } @@ -139,19 +147,26 @@ func (s *ScaleTestSuite) TestTfpScaleDynamicInput() { adminClient, err := provisioning.FetchAdminClient(s.T(), s.client) require.NoError(s.T(), err) - provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, s.terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, s.terratestConfig) + clusterIDs := provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, s.terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(s.T(), adminClient, clusterIDs) s.terratestConfig.Nodepools = s.terratestConfig.ScalingInput.ScaledUpNodepools provisioning.Scale(s.T(), s.client, s.rancherConfig, s.terraformConfig, s.terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, s.terratestConfig) + + time.Sleep(2 * time.Minute) + + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) provisioning.VerifyNodeCount(s.T(), adminClient, clusterName, s.terraformConfig, s.terratestConfig.ScalingInput.ScaledUpNodeCount) s.terratestConfig.Nodepools = s.terratestConfig.ScalingInput.ScaledDownNodepools provisioning.Scale(s.T(), s.client, s.rancherConfig, s.terraformConfig, s.terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, s.terratestConfig) + + time.Sleep(2 * time.Minute) + + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) provisioning.VerifyNodeCount(s.T(), adminClient, clusterName, s.terraformConfig, s.terratestConfig.ScalingInput.ScaledDownNodeCount) }) } diff --git a/tests/rancher2/provisioning/provision_hosted_test.go b/tests/rancher2/provisioning/provision_hosted_test.go index ea16bcf1..268bc684 100644 --- a/tests/rancher2/provisioning/provision_hosted_test.go +++ b/tests/rancher2/provisioning/provision_hosted_test.go @@ -76,8 +76,10 @@ func (p *ProvisionHostedTestSuite) TestTfpProvisionHosted() { adminClient, err := provisioning.FetchAdminClient(p.T(), p.client) require.NoError(p.T(), err) - provisioning.Provision(p.T(), p.client, p.rancherConfig, p.terraformConfig, p.terratestConfig, testUser, testPassword, clusterName, poolName, p.terraformOptions, nil) - provisioning.VerifyCluster(p.T(), adminClient, clusterName, p.terraformConfig, p.terratestConfig) + clusterIDs := provisioning.Provision(p.T(), p.client, p.rancherConfig, p.terraformConfig, p.terratestConfig, testUser, testPassword, clusterName, poolName, p.terraformOptions, nil) + provisioning.VerifyClustersState(p.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(p.T(), adminClient, clusterIDs) + provisioning.VerifyKubernetesVersion(p.T(), adminClient, clusterIDs[0], p.terratestConfig.KubernetesVersion, p.terraformConfig.Module) }) } diff --git a/tests/rancher2/provisioning/provision_test.go b/tests/rancher2/provisioning/provision_test.go index 7bddb349..fcfbc85c 100644 --- a/tests/rancher2/provisioning/provision_test.go +++ b/tests/rancher2/provisioning/provision_test.go @@ -85,8 +85,9 @@ func (p *ProvisionTestSuite) TestTfpProvision() { adminClient, err := provisioning.FetchAdminClient(p.T(), p.client) require.NoError(p.T(), err) - provisioning.Provision(p.T(), p.client, p.rancherConfig, p.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, p.terraformOptions, nil) - provisioning.VerifyCluster(p.T(), adminClient, clusterName, p.terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(p.T(), p.client, p.rancherConfig, p.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, p.terraformOptions, nil) + provisioning.VerifyClustersState(p.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(p.T(), adminClient, clusterIDs) }) } @@ -114,8 +115,9 @@ func (p *ProvisionTestSuite) TestTfpProvisionDynamicInput() { adminClient, err := provisioning.FetchAdminClient(p.T(), p.client) require.NoError(p.T(), err) - provisioning.Provision(p.T(), p.client, p.rancherConfig, p.terraformConfig, p.terratestConfig, testUser, testPassword, clusterName, poolName, p.terraformOptions, nil) - provisioning.VerifyCluster(p.T(), adminClient, clusterName, p.terraformConfig, p.terratestConfig) + clusterIDs := provisioning.Provision(p.T(), p.client, p.rancherConfig, p.terraformConfig, p.terratestConfig, testUser, testPassword, clusterName, poolName, p.terraformOptions, nil) + provisioning.VerifyClustersState(p.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(p.T(), adminClient, clusterIDs) }) } diff --git a/tests/rancher2/psact/psact_test.go b/tests/rancher2/psact/psact_test.go index 59f05c16..0447e645 100644 --- a/tests/rancher2/psact/psact_test.go +++ b/tests/rancher2/psact/psact_test.go @@ -88,8 +88,9 @@ func (p *PSACTTestSuite) TestTfpPSACT() { adminClient, err := provisioning.FetchAdminClient(p.T(), p.client) require.NoError(p.T(), err) - provisioning.Provision(p.T(), p.client, p.rancherConfig, p.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, p.terraformOptions, nil) - provisioning.VerifyCluster(p.T(), adminClient, clusterName, p.terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(p.T(), p.client, p.rancherConfig, p.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, p.terraformOptions, nil) + provisioning.VerifyClustersState(p.T(), adminClient, clusterIDs) + provisioning.VerifyClusterPSACT(p.T(), p.client, clusterIDs) }) } diff --git a/tests/rancher2/rbac/rbac_test.go b/tests/rancher2/rbac/rbac_test.go index 3cd5f864..8faba8fd 100644 --- a/tests/rancher2/rbac/rbac_test.go +++ b/tests/rancher2/rbac/rbac_test.go @@ -75,7 +75,7 @@ func (r *RBACTestSuite) TestTfpRBAC() { terratestConfig := *r.terratestConfig terratestConfig.Nodepools = nodeRolesDedicated - tt.name = tt.name + " Module: " + r.terraformConfig.Module + tt.name = tt.name + " Module:" + r.terraformConfig.Module testUser, testPassword, clusterName, poolName := configs.CreateTestCredentials() @@ -86,10 +86,12 @@ func (r *RBACTestSuite) TestTfpRBAC() { adminClient, err := provisioning.FetchAdminClient(r.T(), r.client) require.NoError(r.T(), err) - provisioning.Provision(r.T(), r.client, r.rancherConfig, r.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, r.terraformOptions, nil) - provisioning.VerifyCluster(r.T(), adminClient, clusterName, r.terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(r.T(), r.client, r.rancherConfig, r.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, r.terraformOptions, nil) + provisioning.VerifyClustersState(r.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(r.T(), adminClient, clusterIDs) rb.RBAC(r.T(), r.client, r.rancherConfig, r.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, r.terraformOptions, tt.rbacRole) + provisioning.VerifyClustersState(r.T(), adminClient, clusterIDs) }) } diff --git a/tests/rancher2/snapshot/snapshot.go b/tests/rancher2/snapshot/snapshot.go index a83d7d3f..a03fe45a 100644 --- a/tests/rancher2/snapshot/snapshot.go +++ b/tests/rancher2/snapshot/snapshot.go @@ -96,7 +96,7 @@ func snapshotV2Prov(t *testing.T, client *rancher.Client, rancherConfig *rancher terraformOptions *terraform.Options) (*apisV1.Cluster, string, *steveV1.SteveAPIObject, *steveV1.SteveAPIObject, error) { terratestConfig.SnapshotInput.CreateSnapshot = true - err := framework.ConfigTF(nil, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) + _, err := framework.ConfigTF(nil, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) require.NoError(t, err) terraform.Apply(t, terraformOptions) @@ -131,7 +131,7 @@ func restoreV2Prov(t *testing.T, client *rancher.Client, rancherConfig *rancher. terratestConfig.SnapshotInput.RestoreSnapshot = true terratestConfig.SnapshotInput.SnapshotName = snapshotName - err := framework.ConfigTF(nil, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) + _, err := framework.ConfigTF(nil, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) require.NoError(t, err) terraform.Apply(t, terraformOptions) @@ -192,7 +192,7 @@ func upgradeCluster(t *testing.T, client *rancher.Client, rancherConfig *rancher terratestConfig.KubernetesVersion = clusterObject.Spec.KubernetesVersion terratestConfig.SnapshotInput.CreateSnapshot = false - err = framework.ConfigTF(nil, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) + _, err = framework.ConfigTF(nil, rancherConfig, terraformConfig, terratestConfig, testUser, testPassword, clusterName, poolName, "", nil) require.NoError(t, err) terraform.Apply(t, terraformOptions) diff --git a/tests/rancher2/snapshot/snapshot_restore_test.go b/tests/rancher2/snapshot/snapshot_restore_test.go index 8107ccb4..21803505 100644 --- a/tests/rancher2/snapshot/snapshot_restore_test.go +++ b/tests/rancher2/snapshot/snapshot_restore_test.go @@ -97,10 +97,12 @@ func (s *SnapshotRestoreTestSuite) TestTfpSnapshotRestore() { adminClient, err := provisioning.FetchAdminClient(s.T(), s.client) require.NoError(s.T(), err) - provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(s.T(), adminClient, clusterIDs) snapshotRestore(s.T(), s.client, s.rancherConfig, s.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions) + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) }) } @@ -132,10 +134,12 @@ func (s *SnapshotRestoreTestSuite) TestTfpSnapshotRestoreDynamicInput() { adminClient, err := provisioning.FetchAdminClient(s.T(), s.client) require.NoError(s.T(), err) - provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, s.terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) - provisioning.VerifyCluster(s.T(), adminClient, clusterName, s.terraformConfig, s.terratestConfig) + clusterIDs := provisioning.Provision(s.T(), s.client, s.rancherConfig, s.terraformConfig, s.terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions, nil) + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(s.T(), adminClient, clusterIDs) snapshotRestore(s.T(), s.client, s.rancherConfig, s.terraformConfig, s.terratestConfig, testUser, testPassword, clusterName, poolName, s.terraformOptions) + provisioning.VerifyClustersState(s.T(), adminClient, clusterIDs) }) } diff --git a/tests/rancher2/upgrading/kubernetes_hosted_test.go b/tests/rancher2/upgrading/kubernetes_hosted_test.go index 6fbeeb75..9e150749 100644 --- a/tests/rancher2/upgrading/kubernetes_hosted_test.go +++ b/tests/rancher2/upgrading/kubernetes_hosted_test.go @@ -77,14 +77,16 @@ func (k *KubernetesUpgradeHostedTestSuite) TestTfpKubernetesUpgradeHosted() { adminClient, err := provisioning.FetchAdminClient(k.T(), k.client) require.NoError(k.T(), err) - provisioning.Provision(k.T(), k.client, k.rancherConfig, k.terraformConfig, k.terratestConfig, testUser, testPassword, clusterName, poolName, k.terraformOptions, nil) - provisioning.VerifyCluster(k.T(), adminClient, clusterName, k.terraformConfig, k.terratestConfig) + clusterIDs := provisioning.Provision(k.T(), k.client, k.rancherConfig, k.terraformConfig, k.terratestConfig, testUser, testPassword, clusterName, poolName, k.terraformOptions, nil) + provisioning.VerifyClustersState(k.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(k.T(), adminClient, clusterIDs) provisioning.KubernetesUpgrade(k.T(), k.client, k.rancherConfig, k.terraformConfig, k.terratestConfig, testUser, testPassword, clusterName, poolName, k.terraformOptions) time.Sleep(4 * time.Minute) - provisioning.VerifyCluster(k.T(), adminClient, clusterName, k.terraformConfig, k.terratestConfig) + provisioning.VerifyClustersState(k.T(), adminClient, clusterIDs) + provisioning.VerifyKubernetesVersion(k.T(), k.client, clusterIDs[0], k.terratestConfig.KubernetesVersion, k.terraformConfig.Module) }) } diff --git a/tests/rancher2/upgrading/kubernetes_upgrade_test.go b/tests/rancher2/upgrading/kubernetes_upgrade_test.go index 6946ef73..534f749e 100644 --- a/tests/rancher2/upgrading/kubernetes_upgrade_test.go +++ b/tests/rancher2/upgrading/kubernetes_upgrade_test.go @@ -84,11 +84,13 @@ func (k *KubernetesUpgradeTestSuite) TestTfpKubernetesUpgrade() { adminClient, err := provisioning.FetchAdminClient(k.T(), k.client) require.NoError(k.T(), err) - provisioning.Provision(k.T(), k.client, k.rancherConfig, k.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, k.terraformOptions, nil) - provisioning.VerifyCluster(k.T(), adminClient, clusterName, k.terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(k.T(), k.client, k.rancherConfig, k.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, k.terraformOptions, nil) + provisioning.VerifyClustersState(k.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(k.T(), adminClient, clusterIDs) provisioning.KubernetesUpgrade(k.T(), k.client, k.rancherConfig, k.terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, k.terraformOptions) - provisioning.VerifyCluster(k.T(), adminClient, clusterName, k.terraformConfig, &terratestConfig) + provisioning.VerifyClustersState(k.T(), adminClient, clusterIDs) + provisioning.VerifyKubernetesVersion(k.T(), k.client, clusterIDs[0], k.terratestConfig.KubernetesVersion, k.terraformConfig.Module) }) } @@ -116,11 +118,13 @@ func (k *KubernetesUpgradeTestSuite) TestTfpKubernetesUpgradeDynamicInput() { adminClient, err := provisioning.FetchAdminClient(k.T(), k.client) require.NoError(k.T(), err) - provisioning.Provision(k.T(), k.client, k.rancherConfig, k.terraformConfig, k.terratestConfig, testUser, testPassword, clusterName, poolName, k.terraformOptions, nil) - provisioning.VerifyCluster(k.T(), adminClient, clusterName, k.terraformConfig, k.terratestConfig) + clusterIDs := provisioning.Provision(k.T(), k.client, k.rancherConfig, k.terraformConfig, k.terratestConfig, testUser, testPassword, clusterName, poolName, k.terraformOptions, nil) + provisioning.VerifyClustersState(k.T(), adminClient, clusterIDs) + provisioning.VerifyWorkloads(k.T(), adminClient, clusterIDs) provisioning.KubernetesUpgrade(k.T(), k.client, k.rancherConfig, k.terraformConfig, k.terratestConfig, testUser, testPassword, clusterName, poolName, k.terraformOptions) - provisioning.VerifyCluster(k.T(), adminClient, clusterName, k.terraformConfig, k.terratestConfig) + provisioning.VerifyClustersState(k.T(), adminClient, clusterIDs) + provisioning.VerifyKubernetesVersion(k.T(), k.client, clusterIDs[0], k.terratestConfig.KubernetesVersion, k.terraformConfig.Module) }) } diff --git a/tests/registries/registries_test.go b/tests/registries/registries_test.go index af71bf77..00ca5565 100644 --- a/tests/registries/registries_test.go +++ b/tests/registries/registries_test.go @@ -128,8 +128,10 @@ func (r *TfpRegistriesTestSuite) TestTfpGlobalRegistry() { keyPath := rancher2.SetKeyPath() defer cleanup.Cleanup(r.T(), r.terraformOptions, keyPath) - provisioning.Provision(r.T(), r.client, r.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, r.terraformOptions, nil) - provisioning.VerifyCluster(r.T(), r.client, clusterName, &terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(r.T(), r.client, r.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, r.terraformOptions, nil) + provisioning.VerifyClustersState(r.T(), r.client, clusterIDs) + provisioning.VerifyRegistry(r.T(), r.client, clusterIDs[0], &terraformConfig) + provisioning.VerifyWorkloads(r.T(), r.client, clusterIDs) }) } @@ -173,8 +175,9 @@ func (r *TfpRegistriesTestSuite) TestTfpAuthenticatedRegistry() { keyPath := rancher2.SetKeyPath() defer cleanup.Cleanup(r.T(), r.terraformOptions, keyPath) - provisioning.Provision(r.T(), r.client, r.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, r.terraformOptions, nil) - provisioning.VerifyCluster(r.T(), r.client, clusterName, &terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(r.T(), r.client, r.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, r.terraformOptions, nil) + provisioning.VerifyRegistry(r.T(), r.client, clusterIDs[0], &terraformConfig) + provisioning.VerifyWorkloads(r.T(), r.client, clusterIDs) }) } @@ -220,8 +223,9 @@ func (r *TfpRegistriesTestSuite) TestTfpNonAuthenticatedRegistry() { keyPath := rancher2.SetKeyPath() defer cleanup.Cleanup(r.T(), r.terraformOptions, keyPath) - provisioning.Provision(r.T(), r.client, r.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, r.terraformOptions, nil) - provisioning.VerifyCluster(r.T(), r.client, clusterName, &terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(r.T(), r.client, r.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, r.terraformOptions, nil) + provisioning.VerifyRegistry(r.T(), r.client, clusterIDs[0], &terraformConfig) + provisioning.VerifyWorkloads(r.T(), r.client, clusterIDs) }) } diff --git a/tests/sanity/tfp_automation_sanity_test.go b/tests/sanity/tfp_automation_sanity_test.go index db1406f7..cd8c291b 100644 --- a/tests/sanity/tfp_automation_sanity_test.go +++ b/tests/sanity/tfp_automation_sanity_test.go @@ -114,8 +114,9 @@ func (t *TfpSanityTestSuite) TestTfpProvisioningSanity() { keyPath := rancher2.SetKeyPath() defer cleanup.Cleanup(t.T(), t.terraformOptions, keyPath) - provisioning.Provision(t.T(), t.client, t.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, t.terraformOptions, nil) - provisioning.VerifyCluster(t.T(), t.client, clusterName, &terraformConfig, &terratestConfig) + clusterIDs := provisioning.Provision(t.T(), t.client, t.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, t.terraformOptions, nil) + provisioning.VerifyWorkloads(t.T(), t.client, clusterIDs) + provisioning.VerifyClustersState(t.T(), t.client, clusterIDs) }) }