From 271b514053abdf9a9dc5587020ef034a02ad36b0 Mon Sep 17 00:00:00 2001 From: Markus Walker Date: Wed, 8 Jan 2025 13:20:30 -0800 Subject: [PATCH] Add initial support for air-gapped environments --- Dockerfile | 2 + build.sh | 2 + config/config.go | 1 + defaults/modules/modules.go | 2 + framework/airgapSetup.go | 30 ++++ framework/cleanup/airgap/cleanup.go | 21 +++ .../cleanup/airgap/cleanupAirgapConfig.go | 48 ++++++ ...tandaloneConfig.go => cleanupRKEConfig.go} | 0 framework/rkeSetup.go | 4 +- framework/sanitySetup.go | 4 +- framework/set/defaults/defaults.go | 81 +++++---- .../airgap/copyScriptToBastion.go | 32 ++++ .../airgap/createRegistrySecret.go | 34 ++++ .../nullresource/setAirgapNullResource.go | 59 +++++++ .../set/provisioning/airgap/register-nodes.sh | 20 +++ .../airgap/registerPrivateNodes.go | 34 ++++ .../set/provisioning/airgap/setConfig.go | 139 ++++++++++++++++ .../custom/instances/setAwsInstances.go | 42 ----- .../provisioning/custom/locals/setLocals.go | 4 +- .../set/provisioning/custom/rke1/setConfig.go | 6 +- .../provisioning/custom/rke2k3s/setConfig.go | 10 +- .../custom/rke2k3s/setRancher2ClusterV2.go | 24 ++- .../multiclusters/setMultiCluster.go | 4 + .../nodedriver/rke2k3s/setConfig.go | 4 +- .../nodedriver/rke2k3s/setPrivateRegistry.go | 17 +- .../resources/airgap/aws/createResources.go | 101 ++++++++++++ .../set/resources/airgap/aws/instances.go | 71 ++++++++ .../set/resources/airgap/createMainTF.go | 81 +++++++++ framework/set/resources/airgap/keyPath.go | 29 ++++ .../set/resources/airgap/rancher/setup.sh | 93 +++++++++++ .../airgap/rancher/setupAirgapRancher.go | 60 +++++++ .../set/resources/airgap/rke2/add-servers.sh | 66 ++++++++ .../set/resources/airgap/rke2/bastion.sh | 44 +++++ .../airgap/rke2/createAirgapCluster.go | 156 ++++++++++++++++++ .../set/resources/airgap/rke2/init-server.sh | 73 ++++++++ .../rancher2/setProvidersAndUsersTF.go | 6 +- .../set/resources/rke/aws/createResources.go | 5 +- .../set/resources/rke/createRKEMainTF.go | 7 +- .../resources/sanity/aws/createResources.go | 23 +-- .../set/resources/sanity/aws/instances.go | 26 ++- .../set/resources/sanity/aws/listeners.go | 31 +++- .../set/resources/sanity/aws/loadbalancer.go | 17 +- .../set/resources/sanity/aws/provider.go | 8 +- framework/set/resources/sanity/aws/route53.go | 27 ++- .../sanity/aws/targetGroupAttachments.go | 32 +++- .../set/resources/sanity/aws/targetGroups.go | 27 ++- .../set/resources/sanity/createMainTF.go | 7 +- .../resources/sanity/rancher/createRancher.go | 3 +- .../set/resources/sanity/rancher/setup.sh | 20 +-- .../resources/sanity/rke2/createCluster.go | 4 +- framework/set/setConfigTF.go | 6 +- modules/airgap/main.tf | 1 + modules/airgap/outputs.tf | 15 ++ tests/airgap/README.md | 110 ++++++++++++ tests/airgap/airgap_provisioning_test.go | 156 ++++++++++++++++++ tests/extensions/provisioning/provision.go | 1 - .../provisioning/supportedModules.go | 2 + tests/rke/rke_provider_test.go | 4 +- tests/sanity/tfp_automation_sanity_test.go | 4 +- 59 files changed, 1776 insertions(+), 164 deletions(-) create mode 100644 framework/airgapSetup.go create mode 100644 framework/cleanup/airgap/cleanup.go create mode 100644 framework/cleanup/airgap/cleanupAirgapConfig.go rename framework/cleanup/rke/{cleanupStandaloneConfig.go => cleanupRKEConfig.go} (100%) create mode 100644 framework/set/provisioning/airgap/copyScriptToBastion.go create mode 100644 framework/set/provisioning/airgap/createRegistrySecret.go create mode 100644 framework/set/provisioning/airgap/nullresource/setAirgapNullResource.go create mode 100755 framework/set/provisioning/airgap/register-nodes.sh create mode 100644 framework/set/provisioning/airgap/registerPrivateNodes.go create mode 100644 framework/set/provisioning/airgap/setConfig.go delete mode 100644 framework/set/provisioning/custom/instances/setAwsInstances.go create mode 100644 framework/set/resources/airgap/aws/createResources.go create mode 100644 framework/set/resources/airgap/aws/instances.go create mode 100644 framework/set/resources/airgap/createMainTF.go create mode 100644 framework/set/resources/airgap/keyPath.go create mode 100755 framework/set/resources/airgap/rancher/setup.sh create mode 100644 framework/set/resources/airgap/rancher/setupAirgapRancher.go create mode 100755 framework/set/resources/airgap/rke2/add-servers.sh create mode 100755 framework/set/resources/airgap/rke2/bastion.sh create mode 100644 framework/set/resources/airgap/rke2/createAirgapCluster.go create mode 100755 framework/set/resources/airgap/rke2/init-server.sh create mode 100644 modules/airgap/main.tf create mode 100644 modules/airgap/outputs.tf create mode 100644 tests/airgap/README.md create mode 100644 tests/airgap/airgap_provisioning_test.go diff --git a/Dockerfile b/Dockerfile index 801f83fd..1b6ed14f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,6 +24,7 @@ ARG AWS_PROVIDER_VERSION ARG RANCHER2_KEY_PATH ARG RKE_KEY_PATH ARG SANITY_KEY_PATH +ARG AIRGAP_KEY_PATH ENV QASE_TEST_RUN_ID=${QASE_TEST_RUN_ID} ENV TERRAFORM_VERSION=${TERRAFORM_VERSION} @@ -36,6 +37,7 @@ ENV AWS_PROVIDER_VERSION=${AWS_PROVIDER_VERSION} ENV RANCHER2_KEY_PATH=${RANCHER2_KEY_PATH} ENV RKE_KEY_PATH=${RKE_KEY_PATH} ENV SANITY_KEY_PATH=${SANITY_KEY_PATH} +ENV AIRGAP_KEY_PATH=${AIRGAP_KEY_PATH} RUN wget https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip -q && apt-get update > /dev/null && apt-get install unzip > /dev/null && \ unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip > /dev/null && \ diff --git a/build.sh b/build.sh index b82f70cf..78c50da8 100755 --- a/build.sh +++ b/build.sh @@ -14,6 +14,7 @@ AWS_PROVIDER_VERSION="${AWS_PROVIDER_VERSION:-}" RANCHER2_KEY_PATH="${RANCHER2_KEY_PATH:-}" RKE_KEY_PATH="${RKE_KEY_PATH:-}" SANITY_KEY_PATH="${SANITY_KEY_PATH:-}" +AIRGAP_KEY_PATH="${AIRGAP_KEY_PATH:-}" TRIM_JOB_NAME=$(basename "$JOB_NAME") @@ -33,6 +34,7 @@ while [[ 3 -gt $count ]]; do --build-arg RANCHER2_KEY_PATH="$RANCHER2_KEY_PATH" \ --build-arg RKE_KEY_PATH="$RKE_KEY_PATH" \ --build-arg SANITY_KEY_PATH="$SANITY_KEY_PATH" \ + --build-arg AIRGAP_KEY_PATH="$AIRGAP_KEY_PATH" \ --build-arg EXTERNAL_ENCODED_VPN="$EXTERNAL_ENCODED_VPN" \ --build-arg VPN_ENCODED_LOGIN="$VPN_ENCODED_LOGIN" \ -t tfp-automation-validation-"${TRIM_JOB_NAME}""${BUILD_NUMBER}" diff --git a/config/config.go b/config/config.go index 2fa4c6e8..12e647e7 100644 --- a/config/config.go +++ b/config/config.go @@ -101,6 +101,7 @@ type PrivateRegistries struct { } type Standalone struct { + AirgapInternalFQDN string `json:"airgapInternalFQDN,omitempty" yaml:"airgapInternalFQDN,omitempty"` BootstrapPassword string `json:"bootstrapPassword,omitempty" yaml:"bootstrapPassword,omitempty"` CertManagerVersion string `json:"certManagerVersion,omitempty" yaml:"certManagerVersion,omitempty"` RancherChartVersion string `json:"rancherChartVersion,omitempty" yaml:"rancherChartVersion,omitempty"` diff --git a/defaults/modules/modules.go b/defaults/modules/modules.go index a8685a17..3b6a96f6 100644 --- a/defaults/modules/modules.go +++ b/defaults/modules/modules.go @@ -17,4 +17,6 @@ const ( VsphereRKE1 = "vsphere_rke1" VsphereRKE2 = "vsphere_rke2" VsphereK3s = "vsphere_k3s" + AirgapRKE2 = "airgap_rke2" + AirgapK3S = "airgap_k3s" ) diff --git a/framework/airgapSetup.go b/framework/airgapSetup.go new file mode 100644 index 00000000..f40f788d --- /dev/null +++ b/framework/airgapSetup.go @@ -0,0 +1,30 @@ +package framework + +import ( + "testing" + + "github.com/gruntwork-io/terratest/modules/logger" + "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/rancher/tfp-automation/config" + resources "github.com/rancher/tfp-automation/framework/set/resources/airgap" +) + +// AirgapSetup is a function that will set the Terraform configuration and return the Terraform options. +func AirgapSetup(t *testing.T, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig) (*terraform.Options, string) { + keyPath := resources.KeyPath() + + var terratestLogger logger.Logger + if terratestConfig.TFLogging { + terratestLogger = *logger.Default + } else { + terratestLogger = *logger.Discard + } + + terraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{ + TerraformDir: keyPath, + NoColor: true, + Logger: &terratestLogger, + }) + + return terraformOptions, keyPath +} diff --git a/framework/cleanup/airgap/cleanup.go b/framework/cleanup/airgap/cleanup.go new file mode 100644 index 00000000..bb708f5b --- /dev/null +++ b/framework/cleanup/airgap/cleanup.go @@ -0,0 +1,21 @@ +package airgap + +import ( + "testing" + + "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/tfp-automation/defaults/configs" +) + +// ConfigAirgapCleanup is a function that will run terraform destroy and cleanup Terraform resources. +func ConfigAirgapCleanup(t *testing.T, terraformOptions *terraform.Options) { + rancherConfig := new(rancher.Config) + config.LoadConfig(configs.Rancher, rancherConfig) + + if *rancherConfig.Cleanup { + terraform.Destroy(t, terraformOptions) + ConfigAirgapCleanupTF() + } +} diff --git a/framework/cleanup/airgap/cleanupAirgapConfig.go b/framework/cleanup/airgap/cleanupAirgapConfig.go new file mode 100644 index 00000000..750aca75 --- /dev/null +++ b/framework/cleanup/airgap/cleanupAirgapConfig.go @@ -0,0 +1,48 @@ +package airgap + +import ( + "os" + + "github.com/rancher/tfp-automation/defaults/configs" + resources "github.com/rancher/tfp-automation/framework/set/resources/airgap" + "github.com/sirupsen/logrus" +) + +// ConfigAirgapCleanupTF is a function that will cleanup the main.tf file and terraform.tfstate files. +func ConfigAirgapCleanupTF() error { + keyPath := resources.KeyPath() + + file, err := os.Create(keyPath + configs.MainTF) + if err != nil { + logrus.Errorf("Failed to overwrite main.tf file. Error: %v", err) + return err + } + + defer file.Close() + + _, err = file.WriteString("// Leave blank - main.tf will be set during testing") + if err != nil { + logrus.Errorf("Failed to write to main.tf file. Error: %v", err) + return err + } + + delete_files := [3]string{configs.TFState, configs.TFStateBackup, configs.TFLockHCL} + + for _, delete_file := range delete_files { + delete_file = keyPath + delete_file + err = os.Remove(delete_file) + + if err != nil { + logrus.Errorf("Failed to delete terraform.tfstate, terraform.tfstate.backup, and terraform.lock.hcl files. Error: %v", err) + return err + } + } + + err = os.RemoveAll(keyPath + configs.TerraformFolder) + if err != nil { + logrus.Errorf("Failed to delete .terraform folder. Error: %v", err) + return err + } + + return nil +} diff --git a/framework/cleanup/rke/cleanupStandaloneConfig.go b/framework/cleanup/rke/cleanupRKEConfig.go similarity index 100% rename from framework/cleanup/rke/cleanupStandaloneConfig.go rename to framework/cleanup/rke/cleanupRKEConfig.go diff --git a/framework/rkeSetup.go b/framework/rkeSetup.go index b19c8825..bc39e915 100644 --- a/framework/rkeSetup.go +++ b/framework/rkeSetup.go @@ -10,7 +10,7 @@ import ( ) // RKESetup is a function that will set the Terraform configuration and return the Terraform options. -func RKESetup(t *testing.T, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig) *terraform.Options { +func RKESetup(t *testing.T, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig) (*terraform.Options, string) { keyPath := resources.KeyPath() var terratestLogger logger.Logger @@ -26,5 +26,5 @@ func RKESetup(t *testing.T, terraformConfig *config.TerraformConfig, terratestCo Logger: &terratestLogger, }) - return terraformOptions + return terraformOptions, keyPath } diff --git a/framework/sanitySetup.go b/framework/sanitySetup.go index e292f3e6..cdd74151 100644 --- a/framework/sanitySetup.go +++ b/framework/sanitySetup.go @@ -10,7 +10,7 @@ import ( ) // SanitySetup is a function that will set the Terraform configuration and return the Terraform options. -func SanitySetup(t *testing.T, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig) *terraform.Options { +func SanitySetup(t *testing.T, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig) (*terraform.Options, string) { keyPath := resources.KeyPath() var terratestLogger logger.Logger @@ -26,5 +26,5 @@ func SanitySetup(t *testing.T, terraformConfig *config.TerraformConfig, terrates Logger: &terratestLogger, }) - return terraformOptions + return terraformOptions, keyPath } diff --git a/framework/set/defaults/defaults.go b/framework/set/defaults/defaults.go index 295d249f..69835810 100644 --- a/framework/set/defaults/defaults.go +++ b/framework/set/defaults/defaults.go @@ -5,12 +5,15 @@ const ( Resource = "resource" ResourceKind = "kind" ResourceName = "name" + Namespace = "namespace" + Triggers = "triggers" DependsOn = "depends_on" GenerateName = "generate_name" Defaults = "defaults" Index = "index" File = "file" + Airgap = "airgap" Custom = "custom" Rancher2Source = "rancher/rancher2" @@ -21,6 +24,7 @@ const ( CloudCredential = "rancher2_cloud_credential" Cluster = "rancher2_cluster" ClusterV2 = "rancher2_cluster_v2" + SecretV2 = "rancher2_secret_v2" RkeConfig = "rke_config" KubernetesVersion = "kubernetes_version" @@ -58,23 +62,25 @@ const ( LogFile = "log_file" - Ami = "ami" - Count = "count" - InstanceType = "instance_type" - SubnetId = "subnet_id" - VpcId = "vpc_id" - VpcSecurityGroupIds = "vpc_security_group_ids" - KeyName = "key_name" - AwsInstance = "aws_instance" - Name = "Name" - Nodes = "nodes" - RootBlockDevice = "root_block_device" - Tags = "tags" - VolumeSize = "volume_size" - Timeout = "timeout" + Ami = "ami" + AssociatePublicIPAddress = "associate_public_ip_address" + Count = "count" + InstanceType = "instance_type" + SubnetId = "subnet_id" + VpcId = "vpc_id" + VpcSecurityGroupIds = "vpc_security_group_ids" + KeyName = "key_name" + AwsInstance = "aws_instance" + Name = "Name" + Nodes = "nodes" + RootBlockDevice = "root_block_device" + Tags = "tags" + VolumeSize = "volume_size" + Timeout = "timeout" Locals = "locals" RoleFlags = "role_flags" + AllFlags = "--etcd --controlplane --worker" EtcdRoleFlag = "--etcd" ControlPlaneRoleFlag = "--controlplane" WorkerRoleFlag = "--worker" @@ -97,6 +103,7 @@ const ( User = "user" Self = "self" PublicIp = "public_ip" + PrivateIp = "private_ip" Length = "length" ApiUrl = "api_url" @@ -120,23 +127,39 @@ const ( TokenKey = "token_key" Version = "version" - DefaultAction = "default_action" - HealthCheck = "health_check" - LoadBalancer = "aws_lb" - LoadBalancerARN = "load_balancer_arn" - LoadBalancerListener = "aws_lb_listener" - LoadBalancerType = "load_balancer_type" - LoadBalancerTargetGroup = "aws_lb_target_group" - LoadBalancerTargetGroupAttachment = "aws_lb_target_group_attachment" - Port = "port" - Route53Record = "aws_route53_record" - Route53Zone = "aws_route53_zone" - Subnets = "subnets" + DefaultAction = "default_action" + HealthCheck = "health_check" + + InternalLoadBalancer = "aws_internal_lb" + LoadBalancer = "aws_lb" + + LoadBalancerARN = "load_balancer_arn" + LoadBalancerInternalListerner = "aws_internal_lb_listener" + LoadBalancerListener = "aws_lb_listener" + + LoadBalancerType = "load_balancer_type" + LoadBalancerTargetGroup = "aws_lb_target_group" + + LoadBalancreInternalTargetGroupAttachment = "aws_internal_lb_target_group_attachment" + LoadBalancerTargetGroupAttachment = "aws_lb_target_group_attachment" + + Port = "port" + Route53InternalRecord = "aws_internal_route53_record" + Route53Record = "aws_route53_record" + Route53Zone = "aws_route53_zone" + Subnets = "subnets" + + InternalTargetGroup80Attachment = "aws_internal_tg_attachment_80_server" + InternalTargetGroup443Attachment = "aws_internal_tg_attachment_443_server" + InternalTargetGroup6443Attachment = "aws_internal_tg_attachment_6443_server" + InternalTargetGroup9345Attachment = "aws_internal_tg_attachment_9345_server" TargetGroup80Attachment = "aws_tg_attachment_80_server" TargetGroup443Attachment = "aws_tg_attachment_443_server" TargetGroup6443Attachment = "aws_tg_attachment_6443_server" TargetGroup9345Attachment = "aws_tg_attachment_9345_server" - TargetGroupARN = "target_group_arn" - TargetGroupPrefix = "aws_tg_" - TargetID = "target_id" + + TargetGroupARN = "target_group_arn" + TargetGroupInternalPrefix = "aws_internal_tg_" + TargetGroupPrefix = "aws_tg_" + TargetID = "target_id" ) diff --git a/framework/set/provisioning/airgap/copyScriptToBastion.go b/framework/set/provisioning/airgap/copyScriptToBastion.go new file mode 100644 index 00000000..1f617102 --- /dev/null +++ b/framework/set/provisioning/airgap/copyScriptToBastion.go @@ -0,0 +1,32 @@ +package airgap + +import ( + "os" + "path/filepath" + + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/rancher/tfp-automation/framework/set/defaults" + "github.com/zclconf/go-cty/cty" +) + +// copyScript is a function that will copy the register-nodes.sh script to the bastion node +func copyScript(provisionerBlockBody *hclwrite.Body) error { + userDir, err := os.UserHomeDir() + if err != nil { + return nil + } + + nodesScriptPath := filepath.Join(userDir, "go/src/github.com/rancher/tfp-automation/framework/set/provisioning/airgap/register-nodes.sh") + + nodesScriptContent, err := os.ReadFile(nodesScriptPath) + if err != nil { + return nil + } + + provisionerBlockBody.SetAttributeValue(defaults.Inline, cty.ListVal([]cty.Value{ + cty.StringVal("echo '" + string(nodesScriptContent) + "' > /tmp/register-nodes.sh"), + cty.StringVal("chmod +x /tmp/register-nodes.sh"), + })) + + return nil +} diff --git a/framework/set/provisioning/airgap/createRegistrySecret.go b/framework/set/provisioning/airgap/createRegistrySecret.go new file mode 100644 index 00000000..42d420c2 --- /dev/null +++ b/framework/set/provisioning/airgap/createRegistrySecret.go @@ -0,0 +1,34 @@ +package airgap + +import ( + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/rancher/tfp-automation/config" + "github.com/rancher/tfp-automation/framework/set/defaults" + "github.com/zclconf/go-cty/cty" +) + +const ( + clusterID = "cluster_id" + localCluster = "local" + namespace = "fleet-default" + password = "password" + secretType = "kubernetes.io/basic-auth" + username = "username" +) + +// createRegistrySecret is a function that will set the airgap RKE2/K3s cluster configurations in the main.tf file. +func createRegistrySecret(terraformConfig *config.TerraformConfig, clusterName string, rootBody *hclwrite.Body) { + secretBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.SecretV2, clusterName}) + secretBlockBody := secretBlock.Body() + + secretBlockBody.SetAttributeValue(clusterID, cty.StringVal(localCluster)) + secretBlockBody.SetAttributeValue(defaults.ResourceName, cty.StringVal(terraformConfig.PrivateRegistries.AuthConfigSecretName)) + secretBlockBody.SetAttributeValue(defaults.Namespace, cty.StringVal(namespace)) + secretBlockBody.SetAttributeValue(defaults.Type, cty.StringVal(secretType)) + + dataBlock := secretBlockBody.AppendNewBlock(defaults.Data+" =", nil) + configBlockBody := dataBlock.Body() + + configBlockBody.SetAttributeValue(password, cty.StringVal(terraformConfig.PrivateRegistries.Password)) + configBlockBody.SetAttributeValue(username, cty.StringVal(terraformConfig.PrivateRegistries.Username)) +} diff --git a/framework/set/provisioning/airgap/nullresource/setAirgapNullResource.go b/framework/set/provisioning/airgap/nullresource/setAirgapNullResource.go new file mode 100644 index 00000000..0ea16a4d --- /dev/null +++ b/framework/set/provisioning/airgap/nullresource/setAirgapNullResource.go @@ -0,0 +1,59 @@ +package nullresource + +import ( + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/rancher/tfp-automation/config" + "github.com/rancher/tfp-automation/framework/set/defaults" + "github.com/zclconf/go-cty/cty" +) + +const ( + alwaysRun = "always_run" + bastion = "bastion" +) + +// SetAirgapNullResource is a function that will set the airgap null_resource configurations in the main.tf file, +// to register the nodes to the cluster +func SetAirgapNullResource(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, description string, + dependsOn []string) (*hclwrite.Body, error) { + nullResourceBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.NullResource, description}) + nullResourceBlockBody := nullResourceBlock.Body() + + if len(dependsOn) > 0 { + var dependsOnValue hclwrite.Tokens + for _, dep := range dependsOn { + dependsOnValue = hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(dep)}, + } + } + + nullResourceBlockBody.SetAttributeRaw(defaults.DependsOn, dependsOnValue) + } + + provisionerBlock := nullResourceBlockBody.AppendNewBlock(defaults.Provisioner, []string{defaults.RemoteExec}) + provisionerBlockBody := provisionerBlock.Body() + + connectionBlock := provisionerBlockBody.AppendNewBlock(defaults.Connection, nil) + connectionBlockBody := connectionBlock.Body() + + bastionHostExpression := defaults.AwsInstance + `.` + bastion + `.` + defaults.PublicIp + bastionHost := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(bastionHostExpression)}, + } + + connectionBlockBody.SetAttributeRaw(defaults.Host, bastionHost) + + connectionBlockBody.SetAttributeValue(defaults.Type, cty.StringVal(defaults.Ssh)) + connectionBlockBody.SetAttributeValue(defaults.User, cty.StringVal(terraformConfig.AWSConfig.AWSUser)) + + keyPathExpression := defaults.File + `("` + terraformConfig.PrivateKeyPath + `")` + keyPath := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(keyPathExpression)}, + } + + connectionBlockBody.SetAttributeRaw(defaults.PrivateKey, keyPath) + connectionBlockBody.SetAttributeValue(defaults.Timeout, cty.StringVal(terraformConfig.AWSConfig.Timeout)) + + return provisionerBlockBody, nil +} diff --git a/framework/set/provisioning/airgap/register-nodes.sh b/framework/set/provisioning/airgap/register-nodes.sh new file mode 100755 index 00000000..b4ec7eb8 --- /dev/null +++ b/framework/set/provisioning/airgap/register-nodes.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +PEM_FILE=$1 +USER=$2 +GROUP=$3 +BASTION_IP=$4 +NODE_PRIVATE_IP=$5 +REGISTRATION_COMMAND=$6 + +set -e + +echo ${PEM_FILE} | sudo base64 -d > /home/${USER}/airgap.pem +echo "${REGISTRATION_COMMAND}" > /home/${USER}/registration_command.txt +REGISTRATION_COMMAND=$(cat /home/$USER/registration_command.txt) + +PEM=/home/${USER}/airgap.pem +sudo chmod 600 ${PEM} +sudo chown ${USER}:${GROUP} ${PEM} + +ssh -o ProxyCommand="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i $PEM -W %h:%p $USER@$BASTION_IP" -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i $PEM $USER@$NODE_PRIVATE_IP "$REGISTRATION_COMMAND; sleep 10" \ No newline at end of file diff --git a/framework/set/provisioning/airgap/registerPrivateNodes.go b/framework/set/provisioning/airgap/registerPrivateNodes.go new file mode 100644 index 00000000..33c9c23d --- /dev/null +++ b/framework/set/provisioning/airgap/registerPrivateNodes.go @@ -0,0 +1,34 @@ +package airgap + +import ( + "encoding/base64" + "os" + + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/rancher/tfp-automation/config" + "github.com/rancher/tfp-automation/framework/set/defaults" +) + +// registerPrivateNodes is a function that will register the private nodes to the cluster +func registerPrivateNodes(provisionerBlockBody *hclwrite.Body, terraformConfig *config.TerraformConfig, bastionPublicIP, nodePrivateIP, + registrationCommand string) error { + privateKey, err := os.ReadFile(terraformConfig.PrivateKeyPath) + if err != nil { + return nil + } + + encodedPEMFile := base64.StdEncoding.EncodeToString([]byte(privateKey)) + + newCommand := `\"` + registrationCommand + `\"` + + provisionerBlockBody.SetAttributeRaw(defaults.Inline, hclwrite.Tokens{ + {Type: hclsyntax.TokenOQuote, Bytes: []byte(`["`), SpacesBefore: 1}, + {Type: hclsyntax.TokenStringLit, Bytes: []byte("/tmp/register-nodes.sh " + encodedPEMFile + " " + + terraformConfig.Standalone.RKE2User + " " + terraformConfig.Standalone.RKE2Group + " " + bastionPublicIP + " " + + nodePrivateIP + " " + newCommand)}, + {Type: hclsyntax.TokenCQuote, Bytes: []byte(`"]`), SpacesBefore: 1}, + }) + + return nil +} diff --git a/framework/set/provisioning/airgap/setConfig.go b/framework/set/provisioning/airgap/setConfig.go new file mode 100644 index 00000000..c507a928 --- /dev/null +++ b/framework/set/provisioning/airgap/setConfig.go @@ -0,0 +1,139 @@ +package airgap + +import ( + "fmt" + "os" + + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/tfp-automation/config" + "github.com/rancher/tfp-automation/defaults/modules" + "github.com/rancher/tfp-automation/framework/set/defaults" + "github.com/rancher/tfp-automation/framework/set/provisioning/airgap/nullresource" + "github.com/rancher/tfp-automation/framework/set/provisioning/custom/locals" + v2 "github.com/rancher/tfp-automation/framework/set/provisioning/custom/rke2k3s" + airgap "github.com/rancher/tfp-automation/framework/set/resources/airgap/aws" + "github.com/rancher/tfp-automation/framework/set/resources/sanity/aws" + "github.com/sirupsen/logrus" +) + +const ( + airgapNodeOne = "airgap_node1" + airgapNodeTwo = "airgap_node2" + airgapNodeThree = "airgap_node3" + bastion = "bastion" + copyScriptToBastion = "copy_script_to_bastion" +) + +// // SetAirgapRKE2K3s is a function that will set the airgap RKE2/K3s cluster configurations in the main.tf file. +func SetAirgapRKE2K3s(rancherConfig *rancher.Config, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, + configMap []map[string]any, clusterName string, newFile *hclwrite.File, rootBody *hclwrite.Body, file *os.File) (*os.File, error) { + v2.SetRancher2ClusterV2(rootBody, terraformConfig, terratestConfig, clusterName) + rootBody.AppendNewline() + + createRegistrySecret(terraformConfig, clusterName, rootBody) + rootBody.AppendNewline() + + aws.CreateAWSInstances(rootBody, terraformConfig, terratestConfig, bastion) + rootBody.AppendNewline() + + // Based on GH issue https://github.com/rancher/rancher/issues/45607, K3s clusters will only have one node. + instances := []string{} + if terraformConfig.Module == modules.AirgapRKE2 { + instances = []string{airgapNodeOne, airgapNodeTwo, airgapNodeThree} + } else if terraformConfig.Module == modules.AirgapK3S { + instances = []string{airgapNodeOne} + } + + for _, instance := range instances { + airgap.CreateAirgappedAWSInstances(rootBody, terraformConfig, instance) + rootBody.AppendNewline() + } + + provisionerBlockBody, err := nullresource.SetAirgapNullResource(rootBody, terraformConfig, copyScriptToBastion, nil) + if err != nil { + return nil, err + } + + rootBody.AppendNewline() + + file, _ = locals.SetLocals(rootBody, terraformConfig, configMap, clusterName, newFile, file, nil) + + rootBody.AppendNewline() + + err = copyScript(provisionerBlockBody) + if err != nil { + return nil, err + } + + registrationCommands, nodePrivateIPs := getRegistrationCommands(terraformConfig, clusterName) + + for _, instance := range instances { + var dependsOn []string + + // Depending on the airgapped node, add the specific dependsOn expression. + bastionScriptExpression := "[" + defaults.NullResource + `.copy_script_to_bastion` + "]" + nodeOneExpression := "[" + defaults.NullResource + `.register_` + airgapNodeOne + "]" + nodeTwoExpression := "[" + defaults.NullResource + `.register_` + airgapNodeTwo + "]" + + bastionPublicIP := fmt.Sprintf("${%s.%s.%s}", defaults.AwsInstance, bastion, defaults.PublicIp) + + if instance == airgapNodeOne { + dependsOn = append(dependsOn, bastionScriptExpression) + } else if instance == airgapNodeTwo { + dependsOn = append(dependsOn, nodeOneExpression) + } else if instance == airgapNodeThree { + dependsOn = append(dependsOn, nodeTwoExpression) + } + + provisionerBlockBody, err = nullresource.SetAirgapNullResource(rootBody, terraformConfig, "register_"+instance, dependsOn) + if err != nil { + return nil, err + } + + err = registerPrivateNodes(provisionerBlockBody, terraformConfig, bastionPublicIP, nodePrivateIPs[instance], registrationCommands[instance]) + if err != nil { + return nil, err + } + + rootBody.AppendNewline() + } + + _, err = file.Write(newFile.Bytes()) + if err != nil { + logrus.Infof("Failed to write custom RKE2/K3s configurations to main.tf file. Error: %v", err) + return nil, err + } + + return file, nil +} + +// getRegistrationCommands is a helper function that will return the registration commands for the airgap nodes. +func getRegistrationCommands(terraformConfig *config.TerraformConfig, clusterName string) (map[string]string, map[string]string) { + commands := make(map[string]string) + nodePrivateIPs := make(map[string]string) + + etcdRegistrationCommand := fmt.Sprintf("${%s.%s_%s} %s", defaults.Local, clusterName, defaults.InsecureNodeCommand, defaults.EtcdRoleFlag) + controlPlaneRegistrationCommand := fmt.Sprintf("${%s.%s_%s} %s", defaults.Local, clusterName, defaults.InsecureNodeCommand, defaults.ControlPlaneRoleFlag) + workerRegistrationCommand := fmt.Sprintf("${%s.%s_%s} %s", defaults.Local, clusterName, defaults.InsecureNodeCommand, defaults.WorkerRoleFlag) + allRolesRegistrationCommand := fmt.Sprintf("${%s.%s_%s} %s", defaults.Local, clusterName, defaults.InsecureNodeCommand, defaults.AllFlags) + + airgapNodeOnePrivateIP := fmt.Sprintf("${%s.%s.%s}", defaults.AwsInstance, airgapNodeOne, defaults.PrivateIp) + airgapNodeTwoPrivateIP := fmt.Sprintf("${%s.%s.%s}", defaults.AwsInstance, airgapNodeTwo, defaults.PrivateIp) + airgapNodeThreePrivateIP := fmt.Sprintf("${%s.%s.%s}", defaults.AwsInstance, airgapNodeThree, defaults.PrivateIp) + + if terraformConfig.Module == modules.AirgapRKE2 { + commands[airgapNodeOne] = etcdRegistrationCommand + commands[airgapNodeTwo] = controlPlaneRegistrationCommand + commands[airgapNodeThree] = workerRegistrationCommand + + nodePrivateIPs[airgapNodeOne] = airgapNodeOnePrivateIP + nodePrivateIPs[airgapNodeTwo] = airgapNodeTwoPrivateIP + nodePrivateIPs[airgapNodeThree] = airgapNodeThreePrivateIP + } else if terraformConfig.Module == modules.AirgapK3S { + commands[airgapNodeOne] = allRolesRegistrationCommand + nodePrivateIPs[airgapNodeOne] = airgapNodeOnePrivateIP + } + + return commands, nodePrivateIPs +} diff --git a/framework/set/provisioning/custom/instances/setAwsInstances.go b/framework/set/provisioning/custom/instances/setAwsInstances.go deleted file mode 100644 index cf865f0a..00000000 --- a/framework/set/provisioning/custom/instances/setAwsInstances.go +++ /dev/null @@ -1,42 +0,0 @@ -package instances - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/hcl/v2/hclwrite" - "github.com/rancher/tfp-automation/config" - "github.com/rancher/tfp-automation/framework/format" - "github.com/rancher/tfp-automation/framework/set/defaults" - "github.com/zclconf/go-cty/cty" -) - -// SetAwsInstances is a function that will set the AWS instances configurations in the main.tf file. -func SetAwsInstances(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, clusterName string) error { - configBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.AwsInstance, clusterName}) - configBlockBody := configBlock.Body() - - configBlockBody.SetAttributeValue(defaults.Count, cty.NumberIntVal(terratestConfig.NodeCount)) - configBlockBody.SetAttributeValue(defaults.Ami, cty.StringVal(terraformConfig.AWSConfig.AMI)) - configBlockBody.SetAttributeValue(defaults.InstanceType, cty.StringVal(terraformConfig.AWSConfig.AWSInstanceType)) - configBlockBody.SetAttributeValue(defaults.SubnetId, cty.StringVal(terraformConfig.AWSConfig.AWSSubnetID)) - awsSecGroupsList := format.ListOfStrings(terraformConfig.AWSConfig.AWSSecurityGroups) - configBlockBody.SetAttributeRaw(defaults.VpcSecurityGroupIds, awsSecGroupsList) - configBlockBody.SetAttributeValue(defaults.KeyName, cty.StringVal(terraformConfig.AWSConfig.AWSKeyName)) - - rootBlockDevice := configBlockBody.AppendNewBlock(defaults.RootBlockDevice, nil) - rootBlockDeviceBody := rootBlockDevice.Body() - rootBlockDeviceBody.SetAttributeValue(defaults.VolumeSize, cty.NumberIntVal(terraformConfig.AWSConfig.AWSRootSize)) - - tagsBlock := configBlockBody.AppendNewBlock(defaults.Tags+" =", nil) - tagsBlockBody := tagsBlock.Body() - - expression := fmt.Sprintf(`"%s-${`+defaults.Count+`.`+defaults.Index+`}"`, terraformConfig.HostnamePrefix) - tags := hclwrite.Tokens{ - {Type: hclsyntax.TokenIdent, Bytes: []byte(expression)}, - } - - tagsBlockBody.SetAttributeRaw(defaults.Name, tags) - - return nil -} diff --git a/framework/set/provisioning/custom/locals/setLocals.go b/framework/set/provisioning/custom/locals/setLocals.go index ddd09e04..cf3f847a 100644 --- a/framework/set/provisioning/custom/locals/setLocals.go +++ b/framework/set/provisioning/custom/locals/setLocals.go @@ -41,10 +41,10 @@ func SetLocals(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, localsBlockBody.SetAttributeRaw(name+"_"+defaults.InsecureNodeCommand, insecureNodeCommand) } - } else { //Temporary workaround until fetching insecure node command is available for rancher2_cluster_v2 resoureces with tfp-rancher2 - if terraformConfig.Module == modules.CustomEC2RKE2 || terraformConfig.Module == modules.CustomEC2K3s { + if terraformConfig.Module == modules.CustomEC2RKE2 || terraformConfig.Module == modules.CustomEC2K3s || + terraformConfig.Module == modules.AirgapRKE2 || terraformConfig.Module == modules.AirgapK3S { originalNodeCommandExpressionClusterV2 := defaults.ClusterV2 + "." + clusterName + "." + defaults.ClusterRegistrationToken + "[0]." + defaults.NodeCommand originalNodeCommand := hclwrite.Tokens{ {Type: hclsyntax.TokenIdent, Bytes: []byte(originalNodeCommandExpressionClusterV2)}, diff --git a/framework/set/provisioning/custom/rke1/setConfig.go b/framework/set/provisioning/custom/rke1/setConfig.go index 0cf7da83..78c00930 100644 --- a/framework/set/provisioning/custom/rke1/setConfig.go +++ b/framework/set/provisioning/custom/rke1/setConfig.go @@ -6,10 +6,10 @@ import ( "github.com/hashicorp/hcl/v2/hclwrite" "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/tfp-automation/config" - "github.com/rancher/tfp-automation/framework/set/provisioning/custom/instances" "github.com/rancher/tfp-automation/framework/set/provisioning/custom/locals" "github.com/rancher/tfp-automation/framework/set/provisioning/custom/nullresource" "github.com/rancher/tfp-automation/framework/set/provisioning/custom/providers" + "github.com/rancher/tfp-automation/framework/set/resources/sanity/aws" "github.com/sirupsen/logrus" ) @@ -17,7 +17,7 @@ import ( func SetCustomRKE1(rancherConfig *rancher.Config, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, configMap []map[string]any, clusterName string, newFile *hclwrite.File, rootBody *hclwrite.Body, file *os.File) (*os.File, error) { if terraformConfig.MultiCluster { - instances.SetAwsInstances(rootBody, terraformConfig, terratestConfig, clusterName) + aws.CreateAWSInstances(rootBody, terraformConfig, terratestConfig, clusterName) setRancher2Cluster(rootBody, terraformConfig, clusterName) @@ -25,7 +25,7 @@ func SetCustomRKE1(rancherConfig *rancher.Config, terraformConfig *config.Terraf } else { providers.SetCustomProviders(rancherConfig, terraformConfig) - instances.SetAwsInstances(rootBody, terraformConfig, terratestConfig, clusterName) + aws.CreateAWSInstances(rootBody, terraformConfig, terratestConfig, clusterName) setRancher2Cluster(rootBody, terraformConfig, clusterName) diff --git a/framework/set/provisioning/custom/rke2k3s/setConfig.go b/framework/set/provisioning/custom/rke2k3s/setConfig.go index 0823358a..2d4474b0 100644 --- a/framework/set/provisioning/custom/rke2k3s/setConfig.go +++ b/framework/set/provisioning/custom/rke2k3s/setConfig.go @@ -6,9 +6,9 @@ import ( "github.com/hashicorp/hcl/v2/hclwrite" "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/tfp-automation/config" - "github.com/rancher/tfp-automation/framework/set/provisioning/custom/instances" "github.com/rancher/tfp-automation/framework/set/provisioning/custom/locals" "github.com/rancher/tfp-automation/framework/set/provisioning/custom/nullresource" + "github.com/rancher/tfp-automation/framework/set/resources/sanity/aws" "github.com/sirupsen/logrus" ) @@ -16,15 +16,15 @@ import ( func SetCustomRKE2K3s(rancherConfig *rancher.Config, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, configMap []map[string]any, clusterName string, newFile *hclwrite.File, rootBody *hclwrite.Body, file *os.File) (*os.File, error) { if terraformConfig.MultiCluster { - instances.SetAwsInstances(rootBody, terraformConfig, terratestConfig, clusterName) + aws.CreateAWSInstances(rootBody, terraformConfig, terratestConfig, clusterName) - setRancher2ClusterV2(rootBody, terraformConfig, terratestConfig, clusterName) + SetRancher2ClusterV2(rootBody, terraformConfig, terratestConfig, clusterName) nullresource.SetNullResource(rootBody, terraformConfig, clusterName) } else { - instances.SetAwsInstances(rootBody, terraformConfig, terratestConfig, clusterName) + aws.CreateAWSInstances(rootBody, terraformConfig, terratestConfig, clusterName) - setRancher2ClusterV2(rootBody, terraformConfig, terratestConfig, clusterName) + SetRancher2ClusterV2(rootBody, terraformConfig, terratestConfig, clusterName) nullresource.SetNullResource(rootBody, terraformConfig, clusterName) diff --git a/framework/set/provisioning/custom/rke2k3s/setRancher2ClusterV2.go b/framework/set/provisioning/custom/rke2k3s/setRancher2ClusterV2.go index 23e9a510..0878ffb0 100644 --- a/framework/set/provisioning/custom/rke2k3s/setRancher2ClusterV2.go +++ b/framework/set/provisioning/custom/rke2k3s/setRancher2ClusterV2.go @@ -1,15 +1,18 @@ package rke2k3s import ( + "strings" + "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/rancher/tfp-automation/config" "github.com/rancher/tfp-automation/framework/set/defaults" + v2 "github.com/rancher/tfp-automation/framework/set/provisioning/nodedriver/rke2k3s" "github.com/zclconf/go-cty/cty" ) -// setRancher2ClusterV2 is a function that will set the rancher2_cluster_v2 configurations in the main.tf file. -func setRancher2ClusterV2(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, clusterName string) error { +// SetRancher2ClusterV2 is a function that will set the rancher2_cluster_v2 configurations in the main.tf file. +func SetRancher2ClusterV2(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, clusterName string) error { rancher2ClusterV2Block := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.ClusterV2, clusterName}) rancher2ClusterV2BlockBody := rancher2ClusterV2Block.Body() @@ -19,10 +22,19 @@ func setRancher2ClusterV2(rootBody *hclwrite.Body, terraformConfig *config.Terra rkeConfigBlock := rancher2ClusterV2BlockBody.AppendNewBlock(defaults.RkeConfig, nil) rkeConfigBlockBody := rkeConfigBlock.Body() - machineGlobalConfigValue := hclwrite.TokensForTraversal(hcl.Traversal{ - hcl.TraverseRoot{Name: "< /tmp/setup.sh"), + cty.StringVal("chmod +x /tmp/setup.sh"), + cty.StringVal(command), + })) + + _, err = file.Write(newFile.Bytes()) + if err != nil { + logrus.Infof("Failed to append configurations to main.tf file. Error: %v", err) + return nil, err + } + + return file, nil +} diff --git a/framework/set/resources/airgap/rke2/add-servers.sh b/framework/set/resources/airgap/rke2/add-servers.sh new file mode 100755 index 00000000..5c93d1c3 --- /dev/null +++ b/framework/set/resources/airgap/rke2/add-servers.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +USER=$1 +GROUP=$2 +RKE2_SERVER_ONE_IP=$3 +RKE2_NEW_SERVER_IP=$4 +RKE2_TOKEN=$5 +REGISTRY=$6 +REGISTRY_USERNAME=$7 +REGISTRY_PASSWORD=$8 +PEM_FILE=/home/$USER/airgap.pem + +set -e + +runSSH() { + local server="$1" + local cmd="$2" + + ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i "$PEM_FILE" "$USER@$server" \ + "export USER=${USER}; \ + export GROUP=${GROUP}; \ + export RKE2_SERVER_ONE_IP=${RKE2_SERVER_ONE_IP}; \ + export RKE2_TOKEN=${RKE2_TOKEN}; \ + export REGISTRY=${REGISTRY}; \ + export REGISTRY_USERNAME=${REGISTRY_USERNAME}; \ + export REGISTRY_PASSWORD=${REGISTRY_PASSWORD}; $cmd" +} + +setupConfig() { + sudo mkdir -p /etc/rancher/rke2 + sudo tee /etc/rancher/rke2/config.yaml > /dev/null << EOF +server: https://${RKE2_SERVER_ONE_IP}:9345 +token: ${RKE2_TOKEN} +tls-san: + - ${RKE2_SERVER_ONE_IP} +EOF +} + +setupRegistry() { + sudo mkdir -p /etc/rancher/rke2 + sudo tee /etc/rancher/rke2/registries.yaml > /dev/null << EOF +mirrors: + docker.io: + endpoint: + - "https://${REGISTRY}" +configs: + "${REGISTRY}": + auth: + username: "${REGISTRY_USERNAME}" + password: "${REGISTRY_PASSWORD}" + tls: + insecure_skip_verify: true +EOF +} + +configFunction=$(declare -f setupConfig) +runSSH "${RKE2_NEW_SERVER_IP}" "${configFunction}; setupConfig" + +setupRegistryFunction=$(declare -f setupRegistry) +runSSH "${RKE2_NEW_SERVER_IP}" "${setupRegistryFunction}; setupRegistry" + +runSSH "${RKE2_NEW_SERVER_IP}" "sudo INSTALL_RKE2_ARTIFACT_PATH=/home/${USER} sh install.sh" +runSSH "${RKE2_NEW_SERVER_IP}" "sudo systemctl enable rke2-server" +runSSH "${RKE2_NEW_SERVER_IP}" "sudo systemctl start rke2-server" + +kubectl get nodes \ No newline at end of file diff --git a/framework/set/resources/airgap/rke2/bastion.sh b/framework/set/resources/airgap/rke2/bastion.sh new file mode 100755 index 00000000..7e5de0d3 --- /dev/null +++ b/framework/set/resources/airgap/rke2/bastion.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +K8S_VERSION=$1 +RKE2_SERVER_ONE_IP=$2 +RK2_SERVER_TWO_IP=$3 +RKE2_SERVER_THREE_IP=$4 +USER=$5 +PEM_FILE=$6 + +set -e + +base64 -d <<< $PEM_FILE > /home/$USER/airgap.pem +PEM=/home/$USER/airgap.pem +chmod 600 $PEM + +wget https://github.com/rancher/rke2/releases/download/${K8S_VERSION}%2Brke2r1/rke2.linux-amd64.tar.gz +wget https://github.com/rancher/rke2/releases/download/${K8S_VERSION}%2Brke2r1/rke2-images.linux-amd64.tar.zst +wget https://github.com/rancher/rke2/releases/download/${K8S_VERSION}%2Brke2r1/sha256sum-amd64.txt + +curl -sfL https://get.rke2.io --output install.sh +chmod +x install.sh + +sudo curl -LO https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl +sudo chmod +x kubectl +sudo mv kubectl /usr/local/bin/ + +echo "Copying files to RKE2 server one" +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null /usr/local/bin/kubectl ${USER}@${RKE2_SERVER_ONE_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null install.sh ${USER}@${RKE2_SERVER_ONE_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null rke2.linux-amd64.tar.gz ${USER}@${RKE2_SERVER_ONE_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null rke2-images.linux-amd64.tar.zst ${USER}@${RKE2_SERVER_ONE_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null sha256sum-amd64.txt ${USER}@${RKE2_SERVER_ONE_IP}:/home/${USER}/ + +echo "Copying files to RKE2 server two" +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null rke2.linux-amd64.tar.gz ${USER}@${RK2_SERVER_TWO_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null rke2-images.linux-amd64.tar.zst ${USER}@${RK2_SERVER_TWO_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null install.sh ${USER}@${RK2_SERVER_TWO_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null sha256sum-amd64.txt ${USER}@${RK2_SERVER_TWO_IP}:/home/${USER}/ + +echo "Copying files to RKE2 server three" +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null rke2.linux-amd64.tar.gz ${USER}@${RKE2_SERVER_THREE_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null rke2-images.linux-amd64.tar.zst ${USER}@${RKE2_SERVER_THREE_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null install.sh ${USER}@${RKE2_SERVER_THREE_IP}:/home/${USER}/ +sudo scp -i ${PEM} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null sha256sum-amd64.txt ${USER}@${RKE2_SERVER_THREE_IP}:/home/${USER}/ \ No newline at end of file diff --git a/framework/set/resources/airgap/rke2/createAirgapCluster.go b/framework/set/resources/airgap/rke2/createAirgapCluster.go new file mode 100644 index 00000000..c4d4cf18 --- /dev/null +++ b/framework/set/resources/airgap/rke2/createAirgapCluster.go @@ -0,0 +1,156 @@ +package rke2 + +import ( + "encoding/base64" + "os" + "path/filepath" + + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" + namegen "github.com/rancher/shepherd/pkg/namegenerator" + "github.com/rancher/tfp-automation/config" + "github.com/rancher/tfp-automation/framework/set/defaults" + "github.com/rancher/tfp-automation/framework/set/resources/sanity/rke2" + "github.com/sirupsen/logrus" + "github.com/zclconf/go-cty/cty" +) + +const ( + rke2Bastion = "rke2_bastion" + rke2ServerOne = "rke2_server1" + rke2ServerTwo = "rke2_server2" + rke2ServerThree = "rke2_server3" + token = "token" +) + +// CreateAirgapRKE2Cluster is a helper function that will create the RKE2 cluster. +func CreateAirgapRKE2Cluster(file *os.File, newFile *hclwrite.File, rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, + rke2BastionPublicDNS, rke2ServerOnePrivateIP, rke2ServerTwoPrivateIP, rke2ServerThreePrivateIP string) (*os.File, error) { + userDir, err := os.UserHomeDir() + if err != nil { + return nil, err + } + + bastionScriptPath := filepath.Join(userDir, "go/src/github.com/rancher/tfp-automation/framework/set/resources/airgap/rke2/bastion.sh") + serverScriptPath := filepath.Join(userDir, "go/src/github.com/rancher/tfp-automation/framework/set/resources/airgap/rke2/init-server.sh") + newServersScriptPath := filepath.Join(userDir, "go/src/github.com/rancher/tfp-automation/framework/set/resources/airgap/rke2/add-servers.sh") + + bastionScriptContent, err := os.ReadFile(bastionScriptPath) + if err != nil { + return nil, err + } + + serverOneScriptContent, err := os.ReadFile(serverScriptPath) + if err != nil { + return nil, err + } + + newServersScriptContent, err := os.ReadFile(newServersScriptPath) + if err != nil { + return nil, err + } + + privateKey, err := os.ReadFile(terraformConfig.PrivateKeyPath) + if err != nil { + return nil, err + } + + encodedPEMFile := base64.StdEncoding.EncodeToString([]byte(privateKey)) + + _, provisionerBlockBody := rke2.CreateNullResource(rootBody, terraformConfig, rke2BastionPublicDNS, rke2Bastion) + + provisionerBlockBody.SetAttributeValue(defaults.Inline, cty.ListVal([]cty.Value{ + cty.StringVal("echo '" + string(bastionScriptContent) + "' > /tmp/bastion.sh"), + cty.StringVal("chmod +x /tmp/bastion.sh"), + cty.StringVal("bash -c '/tmp/bastion.sh " + terraformConfig.Standalone.RKE2Version + " " + rke2ServerOnePrivateIP + " " + + rke2ServerTwoPrivateIP + " " + rke2ServerThreePrivateIP + " " + terraformConfig.Standalone.RKE2User + " " + encodedPEMFile + "'"), + })) + + rke2Token := namegen.AppendRandomString(token) + + createAirgappedRKE2Server(rootBody, terraformConfig, rke2BastionPublicDNS, rke2ServerOnePrivateIP, rke2Token, serverOneScriptContent) + addAirgappedRKE2ServerNodes(rootBody, terraformConfig, rke2BastionPublicDNS, rke2ServerOnePrivateIP, rke2ServerTwoPrivateIP, rke2ServerThreePrivateIP, rke2Token, newServersScriptContent) + + _, err = file.Write(newFile.Bytes()) + if err != nil { + logrus.Infof("Failed to append configurations to main.tf file. Error: %v", err) + return nil, err + } + + return file, nil +} + +// CreateNullResource is a helper function that will create the null_resource for the RKE2 cluster. +func CreateNullResource(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, instance, host string) (*hclwrite.Body, *hclwrite.Body) { + nullResourceBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.NullResource, host}) + nullResourceBlockBody := nullResourceBlock.Body() + + provisionerBlock := nullResourceBlockBody.AppendNewBlock(defaults.Provisioner, []string{defaults.RemoteExec}) + provisionerBlockBody := provisionerBlock.Body() + + connectionBlock := provisionerBlockBody.AppendNewBlock(defaults.Connection, nil) + connectionBlockBody := connectionBlock.Body() + + connectionBlockBody.SetAttributeValue(defaults.Host, cty.StringVal(instance)) + connectionBlockBody.SetAttributeValue(defaults.Type, cty.StringVal(defaults.Ssh)) + connectionBlockBody.SetAttributeValue(defaults.User, cty.StringVal(terraformConfig.AWSConfig.AWSUser)) + + keyPathExpression := defaults.File + `("` + terraformConfig.PrivateKeyPath + `")` + keyPath := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(keyPathExpression)}, + } + + connectionBlockBody.SetAttributeRaw(defaults.PrivateKey, keyPath) + + rootBody.AppendNewline() + + return nullResourceBlockBody, provisionerBlockBody +} + +// createAirgappedRKE2Server is a helper function that will create the RKE2 server. +func createAirgappedRKE2Server(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, rke2BastionPublicDNS, rke2ServerOnePrivateIP, + rke2Token string, script []byte) { + nullResourceBlockBody, provisionerBlockBody := CreateNullResource(rootBody, terraformConfig, rke2BastionPublicDNS, rke2ServerOne) + + provisionerBlockBody.SetAttributeValue(defaults.Inline, cty.ListVal([]cty.Value{ + cty.StringVal("printf '" + string(script) + "' > /tmp/init-server.sh"), + cty.StringVal("chmod +x /tmp/init-server.sh"), + cty.StringVal("bash -c '/tmp/init-server.sh " + terraformConfig.Standalone.RKE2User + " " + terraformConfig.Standalone.RKE2Group + " " + + rke2ServerOnePrivateIP + " " + rke2Token + " " + terraformConfig.PrivateRegistries.URL + " " + + terraformConfig.PrivateRegistries.Username + " " + terraformConfig.PrivateRegistries.Password + "'"), + })) + + dependsOnServer := `[` + defaults.NullResource + `.` + rke2Bastion + `]` + server := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(dependsOnServer)}, + } + + nullResourceBlockBody.SetAttributeRaw(defaults.DependsOn, server) +} + +// addAirgappedRKE2ServerNodes is a helper function that will add additional RKE2 server nodes to the initial RKE2 airgapped server. +func addAirgappedRKE2ServerNodes(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, rke2BastionPublicDNS, rke2ServerOnePrivateIP, rke2ServerTwoPublicDNS, + rke2ServerThreePublicDNS, rke2Token string, script []byte) { + instances := []string{rke2ServerTwoPublicDNS, rke2ServerThreePublicDNS} + hosts := []string{rke2ServerTwo, rke2ServerThree} + + for i, instance := range instances { + host := hosts[i] + nullResourceBlockBody, provisionerBlockBody := CreateNullResource(rootBody, terraformConfig, rke2BastionPublicDNS, host) + + provisionerBlockBody.SetAttributeValue(defaults.Inline, cty.ListVal([]cty.Value{ + cty.StringVal("printf '" + string(script) + "' > /tmp/add-servers.sh"), + cty.StringVal("chmod +x /tmp/add-servers.sh"), + cty.StringVal("bash -c '/tmp/add-servers.sh " + terraformConfig.Standalone.RKE2User + " " + terraformConfig.Standalone.RKE2Group + " " + + rke2ServerOnePrivateIP + " " + instance + " " + rke2Token + " " + terraformConfig.PrivateRegistries.URL + " " + + terraformConfig.PrivateRegistries.Username + " " + terraformConfig.PrivateRegistries.Password + "'"), + })) + + dependsOnServer := `[` + defaults.NullResource + `.` + rke2ServerOne + `]` + server := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(dependsOnServer)}, + } + + nullResourceBlockBody.SetAttributeRaw(defaults.DependsOn, server) + } +} diff --git a/framework/set/resources/airgap/rke2/init-server.sh b/framework/set/resources/airgap/rke2/init-server.sh new file mode 100755 index 00000000..cf14d67f --- /dev/null +++ b/framework/set/resources/airgap/rke2/init-server.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +USER=$1 +GROUP=$2 +RKE2_SERVER_ONE_IP=$3 +RKE2_TOKEN=$4 +REGISTRY=$5 +REGISTRY_USERNAME=$6 +REGISTRY_PASSWORD=$7 +PEM_FILE=/home/$USER/airgap.pem + +set -e + +runSSH() { + local server="$1" + local cmd="$2" + + ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i "$PEM_FILE" "$USER@$server" \ + "export USER=${USER}; \ + export GROUP=${GROUP}; \ + export RKE2_SERVER_ONE_IP=${RKE2_SERVER_ONE_IP}; \ + export RKE2_TOKEN=${RKE2_TOKEN}; \ + export REGISTRY=${REGISTRY}; \ + export REGISTRY_USERNAME=${REGISTRY_USERNAME}; \ + export REGISTRY_PASSWORD=${REGISTRY_PASSWORD}; $cmd" +} + +setupConfig() { + sudo mkdir -p /etc/rancher/rke2 + sudo tee /etc/rancher/rke2/config.yaml > /dev/null << EOF +token: ${RKE2_TOKEN} +tls-san: + - ${RKE2_SERVER_ONE_IP} +EOF +} + +setupRegistry() { + sudo mkdir -p /etc/rancher/rke2 + sudo tee /etc/rancher/rke2/registries.yaml > /dev/null << EOF +mirrors: + docker.io: + endpoint: + - "https://${REGISTRY}" +configs: + "${REGISTRY}": + auth: + username: "${REGISTRY_USERNAME}" + password: "${REGISTRY_PASSWORD}" + tls: + insecure_skip_verify: true +EOF +} + +runSSH "${RKE2_SERVER_ONE_IP}" "sudo mv /home/${USER}/kubectl /usr/local/bin/" + +configFunction=$(declare -f setupConfig) +runSSH "${RKE2_SERVER_ONE_IP}" "${configFunction}; setupConfig" + +setupRegistryFunction=$(declare -f setupRegistry) +runSSH "${RKE2_SERVER_ONE_IP}" "${setupRegistryFunction}; setupRegistry" + +runSSH "${RKE2_SERVER_ONE_IP}" "sudo INSTALL_RKE2_ARTIFACT_PATH=/home/${USER} sh install.sh" +runSSH "${RKE2_SERVER_ONE_IP}" "sudo systemctl enable rke2-server" +runSSH "${RKE2_SERVER_ONE_IP}" "sudo systemctl start rke2-server" + +runSSH "${RKE2_SERVER_ONE_IP}" "sudo mkdir -p /home/${USER}/.kube" +runSSH "${RKE2_SERVER_ONE_IP}" "sudo cp /etc/rancher/rke2/rke2.yaml /home/${USER}/.kube/config" +runSSH "${RKE2_SERVER_ONE_IP}" "sudo chown -R ${USER}:${GROUP} /home/${USER}/.kube" + +mkdir -p ~/.kube +ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${PEM_FILE} ${USER}@${RKE2_SERVER_ONE_IP} "sudo cat /home/${USER}/.kube/config" > ~/.kube/config +sed -i "s|server: https://127.0.0.1:6443|server: https://${RKE2_SERVER_ONE_IP}:6443|" ~/.kube/config +kubectl get nodes \ No newline at end of file diff --git a/framework/set/resources/rancher2/setProvidersAndUsersTF.go b/framework/set/resources/rancher2/setProvidersAndUsersTF.go index 3a497ae1..fb920bf0 100644 --- a/framework/set/resources/rancher2/setProvidersAndUsersTF.go +++ b/framework/set/resources/rancher2/setProvidersAndUsersTF.go @@ -68,7 +68,7 @@ func getProviderVersions(terraformConfig *config.TerraformConfig) (string, strin var awsProviderVersion, localProviderVersion string - if strings.Contains(terraformConfig.Module, "custom") || terraformConfig.MultiCluster { + if strings.Contains(terraformConfig.Module, "custom") || strings.Contains(terraformConfig.Module, "airgap") || terraformConfig.MultiCluster { awsProviderVersion = os.Getenv(awsProviderEnvVar) if awsProviderVersion == "" { logrus.Fatalf("Expected env var not set %s", awsProviderEnvVar) @@ -108,7 +108,7 @@ func createRequiredProviders(rootBody *hclwrite.Body, terraformConfig *config.Te } } - if strings.Contains(terraformConfig.Module, defaults.Custom) || customModule { + if strings.Contains(terraformConfig.Module, defaults.Custom) || strings.Contains(terraformConfig.Module, defaults.Airgap) || customModule { reqProvsBlockBody.SetAttributeValue(defaults.Aws, cty.ObjectVal(map[string]cty.Value{ defaults.Source: cty.StringVal(defaults.AwsSource), defaults.Version: cty.StringVal(awsProviderVersion), @@ -125,7 +125,7 @@ func createRequiredProviders(rootBody *hclwrite.Body, terraformConfig *config.Te version: cty.StringVal(providerVersion), })) - if strings.Contains(terraformConfig.Module, defaults.Custom) { + if strings.Contains(terraformConfig.Module, defaults.Custom) || strings.Contains(terraformConfig.Module, defaults.Airgap) { awsProvBlock := rootBody.AppendNewBlock(defaults.Provider, []string{defaults.Aws}) awsProvBlockBody := awsProvBlock.Body() diff --git a/framework/set/resources/rke/aws/createResources.go b/framework/set/resources/rke/aws/createResources.go index 280133d1..04163d4f 100644 --- a/framework/set/resources/rke/aws/createResources.go +++ b/framework/set/resources/rke/aws/createResources.go @@ -21,7 +21,8 @@ const ( ) // CreateAWSResources is a helper function that will create the AWS resources needed for the RKE1 cluster. -func CreateAWSResources(file *os.File, newFile *hclwrite.File, tfBlockBody, rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig) (*os.File, error) { +func CreateAWSResources(file *os.File, newFile *hclwrite.File, tfBlockBody, rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, + terratestConfig *config.TerratestConfig) (*os.File, error) { createTerraformProviderBlock(tfBlockBody) rootBody.AppendNewline() @@ -33,7 +34,7 @@ func CreateAWSResources(file *os.File, newFile *hclwrite.File, tfBlockBody, root instances := []string{rkeServerOne, rkeServerTwo, rkeServerThree} for _, instance := range instances { - aws.CreateAWSInstances(rootBody, terraformConfig, instance) + aws.CreateAWSInstances(rootBody, terraformConfig, terratestConfig, instance) rootBody.AppendNewline() } diff --git a/framework/set/resources/rke/createRKEMainTF.go b/framework/set/resources/rke/createRKEMainTF.go index a11c6860..f58ac07d 100644 --- a/framework/set/resources/rke/createRKEMainTF.go +++ b/framework/set/resources/rke/createRKEMainTF.go @@ -22,9 +22,8 @@ const ( ) // CreateRKEMainTF is a helper function that will create the main.tf file for creating an RKE1 cluster -func CreateRKEMainTF(t *testing.T, terraformOptions *terraform.Options, terraformConfig *config.TerraformConfig) error { - keyPath := KeyPath() - +func CreateRKEMainTF(t *testing.T, terraformOptions *terraform.Options, keyPath string, terraformConfig *config.TerraformConfig, + terratestConfig *config.TerratestConfig) error { var file *os.File file = resources.OpenFile(file, keyPath) defer file.Close() @@ -35,7 +34,7 @@ func CreateRKEMainTF(t *testing.T, terraformOptions *terraform.Options, terrafor tfBlock := rootBody.AppendNewBlock(terraformConst, nil) tfBlockBody := tfBlock.Body() - file, err := aws.CreateAWSResources(file, newFile, tfBlockBody, rootBody, terraformConfig) + file, err := aws.CreateAWSResources(file, newFile, tfBlockBody, rootBody, terraformConfig, terratestConfig) if err != nil { return err } diff --git a/framework/set/resources/sanity/aws/createResources.go b/framework/set/resources/sanity/aws/createResources.go index a03bf1a9..1b4a0e2d 100644 --- a/framework/set/resources/sanity/aws/createResources.go +++ b/framework/set/resources/sanity/aws/createResources.go @@ -10,8 +10,9 @@ import ( ) // CreateAWSResources is a helper function that will create the AWS resources needed for the RKE2 cluster. -func CreateAWSResources(file *os.File, newFile *hclwrite.File, tfBlockBody, rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig) (*os.File, error) { - createTerraformProviderBlock(tfBlockBody) +func CreateAWSResources(file *os.File, newFile *hclwrite.File, tfBlockBody, rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, + terratestConfig *config.TerratestConfig) (*os.File, error) { + CreateTerraformProviderBlock(tfBlockBody) rootBody.AppendNewline() CreateAWSProviderBlock(rootBody, terraformConfig) @@ -19,31 +20,31 @@ func CreateAWSResources(file *os.File, newFile *hclwrite.File, tfBlockBody, root instances := []string{rke2ServerOne, rke2ServerTwo, rke2ServerThree} for _, instance := range instances { - CreateAWSInstances(rootBody, terraformConfig, instance) + CreateAWSInstances(rootBody, terraformConfig, terratestConfig, instance) rootBody.AppendNewline() } - createLocalBlock(rootBody) + CreateLocalBlock(rootBody) rootBody.AppendNewline() ports := []int64{80, 443, 6443, 9345} for _, port := range ports { - createTargetGroupAttachments(rootBody, defaults.LoadBalancerTargetGroupAttachment, getTargetGroupAttachment(port), port) + CreateTargetGroupAttachments(rootBody, defaults.LoadBalancerTargetGroupAttachment, GetTargetGroupAttachment(port), port) rootBody.AppendNewline() } - createLoadBalancer(rootBody, terraformConfig) + CreateLoadBalancer(rootBody, terraformConfig) rootBody.AppendNewline() for _, port := range ports { - createTargetGroups(rootBody, terraformConfig, port) + CreateTargetGroups(rootBody, terraformConfig, port) rootBody.AppendNewline() - createLoadBalancerListeners(rootBody, port) + CreateLoadBalancerListeners(rootBody, port) rootBody.AppendNewline() } - createRoute53Record(rootBody, terraformConfig) + CreateRoute53Record(rootBody, terraformConfig) rootBody.AppendNewline() _, err := file.Write(newFile.Bytes()) @@ -55,8 +56,8 @@ func CreateAWSResources(file *os.File, newFile *hclwrite.File, tfBlockBody, root return file, err } -// getTargetGroupAttachment gets the target group attachment based on the port -func getTargetGroupAttachment(port int64) string { +// GetTargetGroupAttachment gets the target group attachment based on the port +func GetTargetGroupAttachment(port int64) string { switch port { case 80: return defaults.TargetGroup80Attachment diff --git a/framework/set/resources/sanity/aws/instances.go b/framework/set/resources/sanity/aws/instances.go index fbd3d065..f158fb66 100644 --- a/framework/set/resources/sanity/aws/instances.go +++ b/framework/set/resources/sanity/aws/instances.go @@ -11,10 +11,15 @@ import ( ) // CreateAWSInstances is a function that will set the AWS instances configurations in the main.tf file. -func CreateAWSInstances(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, hostnamePrefix string) { +func CreateAWSInstances(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, + hostnamePrefix string) { configBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.AwsInstance, hostnamePrefix}) configBlockBody := configBlock.Body() + if terraformConfig.Standalone == nil { + configBlockBody.SetAttributeValue(defaults.Count, cty.NumberIntVal(terratestConfig.NodeCount)) + } + configBlockBody.SetAttributeValue(defaults.Ami, cty.StringVal(terraformConfig.AWSConfig.AMI)) configBlockBody.SetAttributeValue(defaults.InstanceType, cty.StringVal(terraformConfig.AWSConfig.AWSInstanceType)) configBlockBody.SetAttributeValue(defaults.SubnetId, cty.StringVal(terraformConfig.AWSConfig.AWSSubnetID)) @@ -38,12 +43,21 @@ func CreateAWSInstances(rootBody *hclwrite.Body, terraformConfig *config.Terrafo tagsBlock := configBlockBody.AppendNewBlock(defaults.Tags+" =", nil) tagsBlockBody := tagsBlock.Body() - expression := fmt.Sprintf(`"%s`, terraformConfig.HostnamePrefix+"-"+hostnamePrefix+`"`) - tags := hclwrite.Tokens{ - {Type: hclsyntax.TokenIdent, Bytes: []byte(expression)}, - } + if terraformConfig.Standalone == nil { + expression := fmt.Sprintf(`"%s-${`+defaults.Count+`.`+defaults.Index+`}"`, terraformConfig.HostnamePrefix) + tags := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(expression)}, + } - tagsBlockBody.SetAttributeRaw(defaults.Name, tags) + tagsBlockBody.SetAttributeRaw(defaults.Name, tags) + } else { + expression := fmt.Sprintf(`"%s`, terraformConfig.HostnamePrefix+"-"+hostnamePrefix+`"`) + tags := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(expression)}, + } + + tagsBlockBody.SetAttributeRaw(defaults.Name, tags) + } configBlockBody.AppendNewline() diff --git a/framework/set/resources/sanity/aws/listeners.go b/framework/set/resources/sanity/aws/listeners.go index 5d71fdd2..432533ef 100644 --- a/framework/set/resources/sanity/aws/listeners.go +++ b/framework/set/resources/sanity/aws/listeners.go @@ -15,8 +15,8 @@ const ( targetGroupARN = "target_group_arn" ) -// createLoadBalancerListeners is a function that will set the load balancer listeners configurations in the main.tf file. -func createLoadBalancerListeners(rootBody *hclwrite.Body, port int64) { +// CreateLoadBalancerListeners is a function that will set the load balancer listeners configurations in the main.tf file. +func CreateLoadBalancerListeners(rootBody *hclwrite.Body, port int64) { listenersGroupBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.LoadBalancerListener, defaults.LoadBalancerListener + "_" + strconv.FormatInt(port, 10)}) listenersGroupBlockBody := listenersGroupBlock.Body() @@ -41,3 +41,30 @@ func createLoadBalancerListeners(rootBody *hclwrite.Body, port int64) { defaultActionBlockBody.SetAttributeRaw(targetGroupARN, values) } + +// CreateInternalLoadBalancerListeners is a function that will set the internal load balancer listeners configurations in the main.tf file. +func CreateInternalLoadBalancerListeners(rootBody *hclwrite.Body, port int64) { + listenersGroupBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.LoadBalancerListener, defaults.LoadBalancerInternalListerner + "_" + strconv.FormatInt(port, 10)}) + listenersGroupBlockBody := listenersGroupBlock.Body() + + loadBalancerExpression := defaults.LoadBalancer + "." + defaults.InternalLoadBalancer + ".arn" + values := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(loadBalancerExpression)}, + } + + listenersGroupBlockBody.SetAttributeRaw(loadBalancerARN, values) + listenersGroupBlockBody.SetAttributeValue(defaults.Port, cty.NumberIntVal(port)) + listenersGroupBlockBody.SetAttributeValue(protocol, cty.StringVal(TCP)) + + defaultActionBlock := listenersGroupBlockBody.AppendNewBlock(defaults.DefaultAction, nil) + defaultActionBlockBody := defaultActionBlock.Body() + + defaultActionBlockBody.SetAttributeValue(defaults.Type, cty.StringVal(forward)) + + targetGroupExpression := defaults.LoadBalancerTargetGroup + "." + defaults.TargetGroupInternalPrefix + strconv.FormatInt(port, 10) + ".arn" + values = hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(targetGroupExpression)}, + } + + defaultActionBlockBody.SetAttributeRaw(targetGroupARN, values) +} diff --git a/framework/set/resources/sanity/aws/loadbalancer.go b/framework/set/resources/sanity/aws/loadbalancer.go index 5996acf1..83b02d6b 100644 --- a/framework/set/resources/sanity/aws/loadbalancer.go +++ b/framework/set/resources/sanity/aws/loadbalancer.go @@ -14,8 +14,8 @@ const ( network = "network" ) -// createLoadBalancer is a function that will set the load balancer configurations in the main.tf file. -func createLoadBalancer(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig) { +// CreateLoadBalancer is a function that will set the load balancer configurations in the main.tf file. +func CreateLoadBalancer(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig) { loadBalancerGroupBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.LoadBalancer, defaults.LoadBalancer}) loadBalancerGroupBodyBlockBody := loadBalancerGroupBlock.Body() @@ -26,3 +26,16 @@ func createLoadBalancer(rootBody *hclwrite.Body, terraformConfig *config.Terrafo loadBalancerGroupBodyBlockBody.SetAttributeRaw(defaults.Subnets, subnetList) loadBalancerGroupBodyBlockBody.SetAttributeValue(name, cty.StringVal(terraformConfig.HostnamePrefix)) } + +// CreateInternalLoadBalancer is a function that will set the internal load balancer configurations in the main.tf file. +func CreateInternalLoadBalancer(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig) { + loadBalancerGroupBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.LoadBalancer, defaults.InternalLoadBalancer}) + loadBalancerGroupBodyBlockBody := loadBalancerGroupBlock.Body() + + loadBalancerGroupBodyBlockBody.SetAttributeValue(internal, cty.BoolVal(true)) + loadBalancerGroupBodyBlockBody.SetAttributeValue(defaults.LoadBalancerType, cty.StringVal(network)) + + subnetList := format.ListOfStrings([]string{terraformConfig.AWSConfig.AWSSubnetID}) + loadBalancerGroupBodyBlockBody.SetAttributeRaw(defaults.Subnets, subnetList) + loadBalancerGroupBodyBlockBody.SetAttributeValue(name, cty.StringVal(terraformConfig.HostnamePrefix+"-"+internal)) +} diff --git a/framework/set/resources/sanity/aws/provider.go b/framework/set/resources/sanity/aws/provider.go index fd2c944d..ae06564b 100644 --- a/framework/set/resources/sanity/aws/provider.go +++ b/framework/set/resources/sanity/aws/provider.go @@ -18,8 +18,8 @@ const ( rke2ServerThree = "rke2_server3" ) -// createTerraformProviderBlock will up the terraform block with the required aws provider. -func createTerraformProviderBlock(tfBlockBody *hclwrite.Body) { +// CreateTerraformProviderBlock will up the terraform block with the required aws provider. +func CreateTerraformProviderBlock(tfBlockBody *hclwrite.Body) { awsProviderVersion := os.Getenv("AWS_PROVIDER_VERSION") reqProvsBlock := tfBlockBody.AppendNewBlock(requiredProviders, nil) @@ -41,8 +41,8 @@ func CreateAWSProviderBlock(rootBody *hclwrite.Body, terraformConfig *config.Ter awsProvBlockBody.SetAttributeValue(defaults.SecretKey, cty.StringVal(terraformConfig.AWSCredentials.AWSSecretKey)) } -// createLocalBlock will set up the local block. Returns the local block. -func createLocalBlock(rootBody *hclwrite.Body) { +// CreateLocalBlock will set up the local block. Returns the local block. +func CreateLocalBlock(rootBody *hclwrite.Body) { localBlock := rootBody.AppendNewBlock(locals, nil) localBlockBody := localBlock.Body() diff --git a/framework/set/resources/sanity/aws/route53.go b/framework/set/resources/sanity/aws/route53.go index 8720796c..ac872b11 100644 --- a/framework/set/resources/sanity/aws/route53.go +++ b/framework/set/resources/sanity/aws/route53.go @@ -18,8 +18,8 @@ const ( zoneID = "zone_id" ) -// createRoute53Record is a function that will set the AWS Route 53 record configuration in the main.tf file. -func createRoute53Record(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig) { +// CreateRoute53Record is a function that will set the AWS Route 53 record configuration in the main.tf file. +func CreateRoute53Record(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig) { routeRecordBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.Route53Record, defaults.Route53Record}) routeRecordBlockBody := routeRecordBlock.Body() @@ -48,3 +48,26 @@ func createRoute53Record(rootBody *hclwrite.Body, terraformConfig *config.Terraf zoneBlockBody.SetAttributeValue(name, cty.StringVal(terraformConfig.AWSConfig.AWSRoute53Zone)) zoneBlockBody.SetAttributeValue(privateZone, cty.BoolVal(false)) } + +// CreateRoute53InternalRecord is a function that will set the AWS Route 53 record configuration in the main.tf file. +func CreateRoute53InternalRecord(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig) { + routeRecordBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.Route53Record, defaults.Route53InternalRecord}) + routeRecordBlockBody := routeRecordBlock.Body() + + zoneIDExpression := defaults.Data + "." + defaults.Route53Zone + "." + selected + "." + zoneID + values := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(zoneIDExpression)}, + } + + routeRecordBlockBody.SetAttributeRaw(zoneID, values) + routeRecordBlockBody.SetAttributeValue(name, cty.StringVal(terraformConfig.HostnamePrefix+"-internal")) + routeRecordBlockBody.SetAttributeValue(defaults.Type, cty.StringVal(CNAME)) + routeRecordBlockBody.SetAttributeValue(ttl, cty.NumberIntVal(300)) + + loadBalancerExpression := "[" + defaults.LoadBalancer + "." + defaults.InternalLoadBalancer + "." + dnsName + "]" + values = hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(loadBalancerExpression)}, + } + + routeRecordBlockBody.SetAttributeRaw(records, values) +} diff --git a/framework/set/resources/sanity/aws/targetGroupAttachments.go b/framework/set/resources/sanity/aws/targetGroupAttachments.go index e3c3c06b..c678602c 100644 --- a/framework/set/resources/sanity/aws/targetGroupAttachments.go +++ b/framework/set/resources/sanity/aws/targetGroupAttachments.go @@ -15,8 +15,8 @@ const ( rke2InstanceIDs = "rke2_instance_ids" ) -// createTargetGroupAttachments is a function that will set the target group attachments configurations in the main.tf file. -func createTargetGroupAttachments(rootBody *hclwrite.Body, lbTargetGroupAttachment, targetGroupAttachmentServer string, port int64) { +// CreateTargetGroupAttachments is a function that will set the target group attachments configurations in the main.tf file. +func CreateTargetGroupAttachments(rootBody *hclwrite.Body, lbTargetGroupAttachment, targetGroupAttachmentServer string, port int64) { targetGroupBlock := rootBody.AppendNewBlock(defaults.Resource, []string{lbTargetGroupAttachment, targetGroupAttachmentServer}) targetGroupBlockBody := targetGroupBlock.Body() @@ -42,3 +42,31 @@ func createTargetGroupAttachments(rootBody *hclwrite.Body, lbTargetGroupAttachme targetGroupBlockBody.SetAttributeRaw(defaults.TargetID, values) targetGroupBlockBody.SetAttributeValue(defaults.Port, cty.NumberIntVal(port)) } + +// CreateInternalTargetGroupAttachments is a function that will set the internal target group attachments configurations in the main.tf file. +func CreateInternalTargetGroupAttachments(rootBody *hclwrite.Body, lbTargetGroupAttachment, targetGroupAttachmentServer string, port int64) { + targetGroupBlock := rootBody.AppendNewBlock(defaults.Resource, []string{lbTargetGroupAttachment, targetGroupAttachmentServer}) + targetGroupBlockBody := targetGroupBlock.Body() + + instanceValueExpression := defaults.Local + "." + rke2InstanceIDs + values := hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(instanceValueExpression)}, + } + + targetGroupBlockBody.SetAttributeRaw(forEach, values) + + targetGroupExpression := defaults.LoadBalancerTargetGroup + "." + defaults.TargetGroupInternalPrefix + fmt.Sprint(port) + ".arn" + values = hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(targetGroupExpression)}, + } + + targetGroupBlockBody.SetAttributeRaw(defaults.TargetGroupARN, values) + + targetIDExpression := eachValue + values = hclwrite.Tokens{ + {Type: hclsyntax.TokenIdent, Bytes: []byte(targetIDExpression)}, + } + + targetGroupBlockBody.SetAttributeRaw(defaults.TargetID, values) + targetGroupBlockBody.SetAttributeValue(defaults.Port, cty.NumberIntVal(port)) +} diff --git a/framework/set/resources/sanity/aws/targetGroups.go b/framework/set/resources/sanity/aws/targetGroups.go index 6bd656a7..cbd3035b 100644 --- a/framework/set/resources/sanity/aws/targetGroups.go +++ b/framework/set/resources/sanity/aws/targetGroups.go @@ -23,8 +23,8 @@ const ( unhealthyThreshold = "unhealthy_threshold" ) -// createTargetGroups is a function that will set the target group configurations in the main.tf file. -func createTargetGroups(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, port int64) { +// CreateTargetGroups is a function that will set the target group configurations in the main.tf file. +func CreateTargetGroups(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, port int64) { targetGroupBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.LoadBalancerTargetGroup, defaults.TargetGroupPrefix + strconv.FormatInt(port, 10)}) targetGroupBlockBody := targetGroupBlock.Body() @@ -45,3 +45,26 @@ func createTargetGroups(rootBody *hclwrite.Body, terraformConfig *config.Terrafo healthCheckGroupBlockBody.SetAttributeValue(unhealthyThreshold, cty.NumberIntVal(3)) healthCheckGroupBlockBody.SetAttributeValue(matcher, cty.StringVal("200-399")) } + +// CreateInternalTargetGroups is a function that will set the internal target group configurations in the main.tf file. +func CreateInternalTargetGroups(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, port int64) { + targetGroupBlock := rootBody.AppendNewBlock(defaults.Resource, []string{defaults.LoadBalancerTargetGroup, defaults.TargetGroupInternalPrefix + strconv.FormatInt(port, 10)}) + targetGroupBlockBody := targetGroupBlock.Body() + + targetGroupBlockBody.SetAttributeValue(defaults.Port, cty.NumberIntVal(port)) + targetGroupBlockBody.SetAttributeValue(protocol, cty.StringVal(TCP)) + targetGroupBlockBody.SetAttributeValue(defaults.VpcId, cty.StringVal(terraformConfig.AWSConfig.AWSVpcID)) + targetGroupBlockBody.SetAttributeValue(name, cty.StringVal(terraformConfig.HostnamePrefix+"-internal-tg-"+strconv.FormatInt(port, 10))) + + healthCheckGroupBlock := targetGroupBlockBody.AppendNewBlock(defaults.HealthCheck, nil) + healthCheckGroupBlockBody := healthCheckGroupBlock.Body() + + healthCheckGroupBlockBody.SetAttributeValue(protocol, cty.StringVal(HTTP)) + healthCheckGroupBlockBody.SetAttributeValue(defaults.Port, cty.StringVal(trafficPort)) + healthCheckGroupBlockBody.SetAttributeValue(path, cty.StringVal(ping)) + healthCheckGroupBlockBody.SetAttributeValue(interval, cty.NumberIntVal(10)) + healthCheckGroupBlockBody.SetAttributeValue(timeout, cty.NumberIntVal(6)) + healthCheckGroupBlockBody.SetAttributeValue(healthyThreshold, cty.NumberIntVal(3)) + healthCheckGroupBlockBody.SetAttributeValue(unhealthyThreshold, cty.NumberIntVal(3)) + healthCheckGroupBlockBody.SetAttributeValue(matcher, cty.StringVal("200-399")) +} diff --git a/framework/set/resources/sanity/createMainTF.go b/framework/set/resources/sanity/createMainTF.go index 833e1eb6..7cb46124 100644 --- a/framework/set/resources/sanity/createMainTF.go +++ b/framework/set/resources/sanity/createMainTF.go @@ -26,9 +26,8 @@ const ( ) // CreateMainTF is a helper function that will create the main.tf file for creating a Rancher server. -func CreateMainTF(t *testing.T, terraformOptions *terraform.Options, terraformConfig *config.TerraformConfig) error { - keyPath := KeyPath() - +func CreateMainTF(t *testing.T, terraformOptions *terraform.Options, keyPath string, terraformConfig *config.TerraformConfig, + terratestConfig *config.TerratestConfig) error { var file *os.File file = OpenFile(file, keyPath) defer file.Close() @@ -39,7 +38,7 @@ func CreateMainTF(t *testing.T, terraformOptions *terraform.Options, terraformCo tfBlock := rootBody.AppendNewBlock(terraformConst, nil) tfBlockBody := tfBlock.Body() - file, err := aws.CreateAWSResources(file, newFile, tfBlockBody, rootBody, terraformConfig) + file, err := aws.CreateAWSResources(file, newFile, tfBlockBody, rootBody, terraformConfig, terratestConfig) if err != nil { return err } diff --git a/framework/set/resources/sanity/rancher/createRancher.go b/framework/set/resources/sanity/rancher/createRancher.go index 1551dea6..5104b20c 100644 --- a/framework/set/resources/sanity/rancher/createRancher.go +++ b/framework/set/resources/sanity/rancher/createRancher.go @@ -32,8 +32,7 @@ func CreateRancher(file *os.File, newFile *hclwrite.File, rootBody *hclwrite.Bod _, provisionerBlockBody := rke2.CreateNullResource(rootBody, terraformConfig, rke2ServerOnePublicDNS, installRancher) - command := "bash -c '/tmp/setup.sh " + terraformConfig.Standalone.RKE2User + " " + terraformConfig.Standalone.RKE2Group + " " + - terraformConfig.Standalone.RancherRepo + " " + terraformConfig.Standalone.RancherChartRepository + " " + + command := "bash -c '/tmp/setup.sh " + terraformConfig.Standalone.RancherRepo + " " + terraformConfig.Standalone.RancherChartRepository + " " + terraformConfig.Standalone.Type + " " + terraformConfig.Standalone.CertManagerVersion + " " + terraformConfig.Standalone.RancherHostname + " " + terraformConfig.Standalone.RancherTagVersion + " " + terraformConfig.Standalone.BootstrapPassword diff --git a/framework/set/resources/sanity/rancher/setup.sh b/framework/set/resources/sanity/rancher/setup.sh index d6dc126a..4e5a5dac 100755 --- a/framework/set/resources/sanity/rancher/setup.sh +++ b/framework/set/resources/sanity/rancher/setup.sh @@ -1,16 +1,14 @@ #!/bin/bash -USER=$1 -GROUP=$2 -RANCHER_REPO=$3 -RANCHER_CHART_REPO=$4 -TYPE=$5 -CERT_MANAGER_VERSION=$6 -HOSTNAME=$7 -RANCHER_TAG_VERSION=$8 -BOOTSTRAP_PASSWORD=$9 -STAGING_RANCHER_IMAGE=${10} -STAGING_RANCHER_AGENT_IMAGE=${11} +RANCHER_REPO=$1 +RANCHER_CHART_REPO=$2 +TYPE=$3 +CERT_MANAGER_VERSION=$4 +HOSTNAME=$5 +RANCHER_TAG_VERSION=$6 +BOOTSTRAP_PASSWORD=$7 +STAGING_RANCHER_IMAGE=${8} +STAGING_RANCHER_AGENT_IMAGE=${9} set -ex diff --git a/framework/set/resources/sanity/rke2/createCluster.go b/framework/set/resources/sanity/rke2/createCluster.go index aa07f57b..c103a0a3 100644 --- a/framework/set/resources/sanity/rke2/createCluster.go +++ b/framework/set/resources/sanity/rke2/createCluster.go @@ -84,7 +84,7 @@ func CreateNullResource(rootBody *hclwrite.Body, terraformConfig *config.Terrafo // createRKE2Server is a helper function that will create the RKE2 server. func createRKE2Server(rootBody *hclwrite.Body, terraformConfig *config.TerraformConfig, rke2ServerOnePublicDNS, rke2ServerOnePrivateIP, - rke2Token string, script []byte) error { + rke2Token string, script []byte) { _, provisionerBlockBody := CreateNullResource(rootBody, terraformConfig, rke2ServerOnePublicDNS, rke2ServerOne) provisionerBlockBody.SetAttributeValue(defaults.Inline, cty.ListVal([]cty.Value{ @@ -93,8 +93,6 @@ func createRKE2Server(rootBody *hclwrite.Body, terraformConfig *config.Terraform cty.StringVal("bash -c '/tmp/init-server.sh " + terraformConfig.Standalone.RKE2User + " " + terraformConfig.Standalone.RKE2Group + " " + terraformConfig.Standalone.RKE2Version + " " + rke2ServerOnePrivateIP + " " + rke2Token + "'"), })) - - return nil } // addRKE2ServerNodes is a helper function that will add additional RKE2 server nodes to the initial RKE2 server. diff --git a/framework/set/setConfigTF.go b/framework/set/setConfigTF.go index e12a2b97..e8998845 100644 --- a/framework/set/setConfigTF.go +++ b/framework/set/setConfigTF.go @@ -10,6 +10,7 @@ import ( "github.com/rancher/tfp-automation/defaults/configs" "github.com/rancher/tfp-automation/defaults/modules" "github.com/rancher/tfp-automation/framework/set/defaults" + "github.com/rancher/tfp-automation/framework/set/provisioning/airgap" custom "github.com/rancher/tfp-automation/framework/set/provisioning/custom/rke1" customV2 "github.com/rancher/tfp-automation/framework/set/provisioning/custom/rke2k3s" "github.com/rancher/tfp-automation/framework/set/provisioning/hosted" @@ -58,7 +59,7 @@ func ConfigTF(client *rancher.Client, rancherConfig *rancher.Config, terraformCo _, err = nodedriver.SetRKE1(terraformConfig, clusterName, poolName, terratestConfig.KubernetesVersion, terratestConfig.PSACT, terratestConfig.Nodepools, terratestConfig.SnapshotInput, newFile, rootBody, file, rbacRole) return err - case (strings.Contains(module, clustertypes.RKE2) || strings.Contains(module, clustertypes.K3S)) && !strings.Contains(module, defaults.Custom): + case (strings.Contains(module, clustertypes.RKE2) || strings.Contains(module, clustertypes.K3S)) && !strings.Contains(module, defaults.Custom) && !strings.Contains(module, defaults.Airgap): _, err = nodedriverV2.SetRKE2K3s(client, terraformConfig, clusterName, poolName, terratestConfig.KubernetesVersion, terratestConfig.PSACT, terratestConfig.Nodepools, terratestConfig.SnapshotInput, newFile, rootBody, file, rbacRole) return err @@ -68,6 +69,9 @@ func ConfigTF(client *rancher.Client, rancherConfig *rancher.Config, terraformCo case module == modules.CustomEC2RKE2 || module == modules.CustomEC2K3s: _, err = customV2.SetCustomRKE2K3s(rancherConfig, terraformConfig, terratestConfig, nil, clusterName, newFile, rootBody, file) return err + case module == modules.AirgapRKE2 || module == modules.AirgapK3S: + _, err = airgap.SetAirgapRKE2K3s(rancherConfig, terraformConfig, terratestConfig, nil, clusterName, newFile, rootBody, file) + return err default: logrus.Errorf("Unsupported module: %v", module) } diff --git a/modules/airgap/main.tf b/modules/airgap/main.tf new file mode 100644 index 00000000..6ffc8ec0 --- /dev/null +++ b/modules/airgap/main.tf @@ -0,0 +1 @@ +// Leave blank - main.tf will be set during testing \ No newline at end of file diff --git a/modules/airgap/outputs.tf b/modules/airgap/outputs.tf new file mode 100644 index 00000000..e51e95a1 --- /dev/null +++ b/modules/airgap/outputs.tf @@ -0,0 +1,15 @@ +output "rke2_bastion_public_dns" { + value = aws_instance.rke2_bastion.public_dns +} + +output "rke2_server1_private_ip" { + value = aws_instance.rke2_server1.private_ip +} + +output "rke2_server2_private_ip" { + value = aws_instance.rke2_server2.private_ip +} + +output "rke2_server3_private_ip" { + value = aws_instance.rke2_server3.private_ip +} \ No newline at end of file diff --git a/tests/airgap/README.md b/tests/airgap/README.md new file mode 100644 index 00000000..a6f49be6 --- /dev/null +++ b/tests/airgap/README.md @@ -0,0 +1,110 @@ +# Airgap Provisioning Tests + +In the tfp-automation airgap provisioning test, the following workflow is followed: + +1. Setup airgapped-Rancher HA utilizing Terraform resources + specified provider infrastructure +2. Provision downstream RKE1 / RKE2 / K3S clusters. +3. Perform post-cluster provisioning checks +4. Cleanup resources (Terraform explicitly needs to call its cleanup method so that each test doesn't experience caching issues) + +Please see below for more details for your config. Please note that the config can be in either JSON or YAML (all examples are illustrated in YAML). + +## Table of Contents +1. [Getting Started](#Getting-Started) +2. [Local Qase Reporting](#Local-Qase-Reporting) + +## Getting Started +The config is split up into multiple parts. Think of the parts as follows: +- Standalone config for setting up Rancher +- Custom cluster config for provisioning downstream clusters +- Rancher config + +In no particular order, see an example below: + +```yaml +####################### +# RANCHER CONFIG +####################### +rancher: + host: "" # REQUIRED - fill out with the expected Rancher server URL + adminPassword: "" # REQUIRED - this is the same as the bootstrapPassword below, make sure they match + adminToken: "" # REQUIRED - leave this field empty as shown + insecure: true # REQUIRED - leave this as true +####################### +# TERRAFORM CONFIG +####################### +terraform: + cloudCredentialName: "" # REQUIRED - fill with desired value + defaultClusterRoleForProjectMembers: "true" # REQUIRED - leave value as true + enableNetworkPolicy: false # REQUIRED - values are true or false - can leave as false + hostnamePrefix: "" # REQUIRED - fill with desired value + machineConfigName: "" # REQUIRED - fill with desired value + module: "" # REQUIRED - leave this field empty as shown + networkPlugin: "" # REQUIRED - fill with desired value + nodeTemplateName: "" # REQUIRED - fill with desired value + privateKeyPath: "" # REQUIRED - specify private key that will be used to access created instances + privateRegistries: + authConfigSecretName: "" # REQUIRED - specify the name of the secret you wanted created + insecure: true + url: "" # REQUIRED - name of the private registry that is already created + systemDefaultRegistry: "" # REQUIRED - name of the private registry that is already created + username: "" # REQUIRED - username of the private registry + password: "" # REQUIRED - password of the private registry + ########################################## + # INFRASTRUCTURE / CUSTOM CLUSTER SETUP + ########################################## + awsCredentials: + awsAccessKey: "" + awsSecretKey: "" + awsConfig: + ami: "" + awsKeyName: "" + awsInstanceType: "" + awsSecurityGroupNames: [""] + awsSubnetID: "" + awsVpcID: "" + awsZoneLetter: "" + awsRootSize: 100 + awsRoute53Zone: "" + region: "" + awsUser: "" + sshConnectionType: "ssh" + timeout: "5m" + ################################### + # STANDALONE CONFIG - RANCHER SETUP + ################################### + standalone: + airgapInternalFQDN: "" # REQUIRED - Have the same name as the rancherHostname but it must end with `-internal` + bootstrapPassword: "" # REQUIRED - this is the same as the adminPassword above, make sure they match + certManagerVersion: "" # REQUIRED - (e.g. v1.15.3) + rancherChartVersion: "" # REQUIRED - fill with desired value + rancherChartRepository: "" # REQUIRED - fill with desired value. Must end with a trailing / + rancherHostname: "" # REQUIRED - fill with desired value + rancherRepo: "" # REQUIRED - fill with desired value + rancherTagVersion: "" # REQUIRED - fill with desired value + rke2Group: "" # REQUIRED - fill with group of the instance created + type: "" # REQUIRED - fill with desired value + rke2User: "" # REQUIRED - fill with username of the instance created + stagingRancherImage: "" # OPTIONAL - fill out only if you are using staging registry + stagingRancherAgentImage: "" # OPTIONAL - fill out only if you are using staging registry + rke2Version: "" # REQUIRED - the format MUST be in `v1.xx.x` (i.e. v1.31.3) +``` + +Before running, be sure to run the following commands: + +`export RANCHER2_KEY_PATH="///go/src/github.com/rancher/tfp-automation/modules/rancher2"; export AIRGAP_KEY_PATH="///go/src/github.com/rancher/tfp-automation/modules/airgap"; export RANCHER2_PROVIDER_VERSION=""; export CATTLE_TEST_CONFIG=; export LOCALS_PROVIDER_VERSION=""; export AWS_PROVIDER_VERSION=""` + +See the below examples on how to run the tests: + +`gotestsum --format standard-verbose --packages=github.com/rancher/tfp-automation/tests/airgap --junitfile results.xml --jsonfile results.json -- -timeout=120m -v -run "TestTfpAirgapProvisioningTestSuite$"` + +If the specified test passes immediately without warning, try adding the -count=1 flag to get around this issue. This will avoid previous results from interfering with the new test run. + +## Local Qase Reporting +If you are planning to report to Qase locally, then you will need to have the following done: +1. The `terratest` block in your config file must have `localQaseReporting: true`. +2. The working shell session must have the following two environmental variables set: + - `QASE_AUTOMATION_TOKEN=""` + - `QASE_TEST_RUN_ID=""` +3. Append `./reporter` to the end of the `gotestsum` command. See an example below:: + - `gotestsum --format standard-verbose --packages=github.com/rancher/tfp-automation/tests/airgap --junitfile results.xml --jsonfile results.json -- -timeout=120m -v -run TestTfpAirgapProvisioningTestSuite$";/path/to/tfp-automation/reporter` \ No newline at end of file diff --git a/tests/airgap/airgap_provisioning_test.go b/tests/airgap/airgap_provisioning_test.go new file mode 100644 index 00000000..f83085c9 --- /dev/null +++ b/tests/airgap/airgap_provisioning_test.go @@ -0,0 +1,156 @@ +package airgap + +import ( + "testing" + + "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/rancher/shepherd/clients/rancher" + management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + "github.com/rancher/shepherd/extensions/token" + ranchFrame "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tfp-automation/config" + "github.com/rancher/tfp-automation/defaults/configs" + "github.com/rancher/tfp-automation/framework" + airgapCleanup "github.com/rancher/tfp-automation/framework/cleanup/airgap" + cleanup "github.com/rancher/tfp-automation/framework/cleanup/rancher2" + resources "github.com/rancher/tfp-automation/framework/set/resources/airgap" + qase "github.com/rancher/tfp-automation/pipeline/qase/results" + "github.com/rancher/tfp-automation/tests/extensions/provisioning" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type TfpAirgapProvisioningTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + rancherConfig *rancher.Config + terraformConfig *config.TerraformConfig + terratestConfig *config.TerratestConfig + standaloneTerraformOptions *terraform.Options + terraformOptions *terraform.Options + adminUser *management.User +} + +func (a *TfpAirgapProvisioningTestSuite) TearDownSuite() { + airgapCleanup.ConfigAirgapCleanup(a.T(), a.standaloneTerraformOptions) +} + +func (a *TfpAirgapProvisioningTestSuite) SetupSuite() { + a.terraformConfig = new(config.TerraformConfig) + ranchFrame.LoadConfig(config.TerraformConfigurationFileKey, a.terraformConfig) + + a.terratestConfig = new(config.TerratestConfig) + ranchFrame.LoadConfig(config.TerratestConfigurationFileKey, a.terratestConfig) + + standaloneTerraformOptions, keyPath := framework.AirgapSetup(a.T(), a.terraformConfig, a.terratestConfig) + a.standaloneTerraformOptions = standaloneTerraformOptions + + resources.CreateMainTF(a.T(), a.standaloneTerraformOptions, keyPath, a.terraformConfig, a.terratestConfig) +} + +func (a *TfpAirgapProvisioningTestSuite) TfpSetupSuite(terratestConfig *config.TerratestConfig, terraformConfig *config.TerraformConfig) { + testSession := session.NewSession() + a.session = testSession + + rancherConfig := new(rancher.Config) + ranchFrame.LoadConfig(configs.Rancher, rancherConfig) + + a.rancherConfig = rancherConfig + + adminUser := &management.User{ + Username: "admin", + Password: rancherConfig.AdminPassword, + } + + a.adminUser = adminUser + + userToken, err := token.GenerateUserToken(adminUser, a.rancherConfig.Host) + require.NoError(a.T(), err) + + client, err := rancher.NewClient(userToken.Token, testSession) + require.NoError(a.T(), err) + + a.client = client + + rancherConfig.AdminToken = userToken.Token + + terraformOptions := framework.Rancher2Setup(a.T(), a.rancherConfig, terraformConfig, terratestConfig) + a.terraformOptions = terraformOptions +} + +func (a *TfpAirgapProvisioningTestSuite) TestTfpAirgapProvisioning() { + tests := []struct { + name string + module string + }{ + {"RKE2", "airgap_rke2"}, + {"K3S", "airgap_k3s"}, + } + + for _, tt := range tests { + terratestConfig := *a.terratestConfig + terraformConfig := *a.terraformConfig + terraformConfig.Module = tt.module + + a.TfpSetupSuite(&terratestConfig, &terraformConfig) + + provisioning.GetK8sVersion(a.T(), a.client, &terratestConfig, &terraformConfig, configs.DefaultK8sVersion) + + tt.name = tt.name + " Kubernetes version: " + terratestConfig.KubernetesVersion + testUser, testPassword, clusterName, poolName := configs.CreateTestCredentials() + + a.Run((tt.name), func() { + defer cleanup.ConfigCleanup(a.T(), a.terraformOptions) + + provisioning.Provision(a.T(), a.client, a.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, a.terraformOptions, nil) + provisioning.VerifyCluster(a.T(), a.client, clusterName, &terraformConfig, &terratestConfig) + }) + } + + if a.terratestConfig.LocalQaseReporting { + qase.ReportTest() + } +} + +func (a *TfpAirgapProvisioningTestSuite) TestTfpAirgapUpgrading() { + tests := []struct { + name string + module string + }{ + {"RKE2", "airgap_rke2"}, + {"K3S", "airgap_k3s"}, + } + + for _, tt := range tests { + terratestConfig := *a.terratestConfig + terraformConfig := *a.terraformConfig + terraformConfig.Module = tt.module + + a.TfpSetupSuite(&terratestConfig, &terraformConfig) + + provisioning.GetK8sVersion(a.T(), a.client, &terratestConfig, &terraformConfig, configs.SecondHighestVersion) + + tt.name = tt.name + " Kubernetes version: " + terratestConfig.KubernetesVersion + testUser, testPassword, clusterName, poolName := configs.CreateTestCredentials() + + a.Run((tt.name), func() { + defer cleanup.ConfigCleanup(a.T(), a.terraformOptions) + + provisioning.Provision(a.T(), a.client, a.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, a.terraformOptions, nil) + provisioning.VerifyCluster(a.T(), a.client, clusterName, &terraformConfig, &terratestConfig) + + provisioning.KubernetesUpgrade(a.T(), a.client, a.rancherConfig, &terraformConfig, &terratestConfig, testUser, testPassword, clusterName, poolName, a.terraformOptions) + provisioning.VerifyCluster(a.T(), a.client, clusterName, &terraformConfig, &terratestConfig) + }) + } + + if a.terratestConfig.LocalQaseReporting { + qase.ReportTest() + } +} + +func TestTfpAirgapProvisioningTestSuite(t *testing.T) { + suite.Run(t, new(TfpAirgapProvisioningTestSuite)) +} diff --git a/tests/extensions/provisioning/provision.go b/tests/extensions/provisioning/provision.go index 83057e7a..dc1a4c0d 100644 --- a/tests/extensions/provisioning/provision.go +++ b/tests/extensions/provisioning/provision.go @@ -13,7 +13,6 @@ import ( // Provision is a function that will run terraform init and apply Terraform resources to provision a cluster. func Provision(t *testing.T, client *rancher.Client, rancherConfig *rancher.Config, terraformConfig *config.TerraformConfig, terratestConfig *config.TerratestConfig, testUser, testPassword, clusterName, poolName string, terraformOptions *terraform.Options, configMap []map[string]any) { - if !terraformConfig.MultiCluster { isSupported := SupportedModules(terraformConfig, terraformOptions, nil) require.True(t, isSupported) diff --git a/tests/extensions/provisioning/supportedModules.go b/tests/extensions/provisioning/supportedModules.go index 63ad0350..25b8f141 100644 --- a/tests/extensions/provisioning/supportedModules.go +++ b/tests/extensions/provisioning/supportedModules.go @@ -43,6 +43,8 @@ func verifyModule(module string) bool { modules.CustomEC2RKE1, modules.CustomEC2RKE2, modules.CustomEC2K3s, + modules.AirgapRKE2, + modules.AirgapK3S, } for _, supportedModule := range supportedModules { diff --git a/tests/rke/rke_provider_test.go b/tests/rke/rke_provider_test.go index 0557b7f9..9beff75b 100644 --- a/tests/rke/rke_provider_test.go +++ b/tests/rke/rke_provider_test.go @@ -36,10 +36,10 @@ func (t *RKEProviderTestSuite) TestCreateRKECluster() { t.terratestConfig = new(config.TerratestConfig) ranchFrame.LoadConfig(config.TerratestConfigurationFileKey, t.terratestConfig) - terraformOptions := framework.RKESetup(t.T(), t.terraformConfig, t.terratestConfig) + terraformOptions, keyPath := framework.RKESetup(t.T(), t.terraformConfig, t.terratestConfig) t.terraformOptions = terraformOptions - rke.CreateRKEMainTF(t.T(), t.terraformOptions, t.terraformConfig) + rke.CreateRKEMainTF(t.T(), t.terraformOptions, keyPath, t.terraformConfig, t.terratestConfig) if t.terratestConfig.LocalQaseReporting { qase.ReportTest() diff --git a/tests/sanity/tfp_automation_sanity_test.go b/tests/sanity/tfp_automation_sanity_test.go index e6429d24..b3b9cf7c 100644 --- a/tests/sanity/tfp_automation_sanity_test.go +++ b/tests/sanity/tfp_automation_sanity_test.go @@ -44,10 +44,10 @@ func (t *TfpSanityTestSuite) SetupSuite() { t.terratestConfig = new(config.TerratestConfig) ranchFrame.LoadConfig(config.TerratestConfigurationFileKey, t.terratestConfig) - standaloneTerraformOptions := framework.SanitySetup(t.T(), t.terraformConfig, t.terratestConfig) + standaloneTerraformOptions, keyPath := framework.SanitySetup(t.T(), t.terraformConfig, t.terratestConfig) t.standaloneTerraformOptions = standaloneTerraformOptions - resources.CreateMainTF(t.T(), t.standaloneTerraformOptions, t.terraformConfig) + resources.CreateMainTF(t.T(), t.standaloneTerraformOptions, keyPath, t.terraformConfig, t.terratestConfig) } func (t *TfpSanityTestSuite) TfpSetupSuite(terratestConfig *config.TerratestConfig, terraformConfig *config.TerraformConfig) {