diff --git a/.mockery.yaml b/.mockery.yaml index d9e1b07099..89ece3fdd4 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -36,3 +36,15 @@ packages: config: dir: "{{.InterfaceDir}}/mocks" outpkg: mocks + + github.com/weaveworks/eksctl/pkg/cfn/manager: + interfaces: + NodeGroupStackManager: + config: + dir: "{{.InterfaceDir}}/mocks" + outpkg: mocks + + NodeGroupResourceSet: + config: + dir: "{{.InterfaceDir}}/mocks" + outpkg: mocks diff --git a/integration/tests/accessentries/accessentries_test.go b/integration/tests/accessentries/accessentries_test.go index 1e48dba4cc..6256f8f07f 100644 --- a/integration/tests/accessentries/accessentries_test.go +++ b/integration/tests/accessentries/accessentries_test.go @@ -6,27 +6,34 @@ package accessentries import ( "bytes" "context" + _ "embed" "encoding/json" "errors" "fmt" "strings" "testing" + "time" + + "github.com/kubicorn/kubicorn/pkg/namer" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/weaveworks/eksctl/integration/runner" - . "github.com/weaveworks/eksctl/integration/runner" - "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudformation" + cfntypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" awseks "github.com/aws/aws-sdk-go-v2/service/eks" ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/aws/aws-sdk-go-v2/service/iam" iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" + "github.com/weaveworks/eksctl/integration/runner" + . "github.com/weaveworks/eksctl/integration/runner" "github.com/weaveworks/eksctl/integration/tests" + clusterutils "github.com/weaveworks/eksctl/integration/utilities/cluster" "github.com/weaveworks/eksctl/pkg/accessentry" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/cfn/outputs" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/testutils" ) @@ -54,6 +61,9 @@ var ( apiDisabledCluster = "accessentries-api-disabled" ) +//go:embed testdata/node-role.yaml +var nodeRoleJSON []byte + func init() { // Call testing.Init() prior to tests.NewParams(), as otherwise -test.* will not be recognised. See also: https://golang.org/doc/go1.13#testing testing.Init() @@ -69,7 +79,6 @@ var _ = SynchronizedBeforeSuite(func() []byte { err error alreadyExistsErr *iamtypes.EntityAlreadyExistsException ) - maybeCreateRoleAndGetARN := func(name string) (string, error) { createOut, err := ctl.AWSProvider.IAM().CreateRole(context.Background(), &iam.CreateRoleInput{ RoleName: aws.String(name), @@ -89,7 +98,6 @@ var _ = SynchronizedBeforeSuite(func() []byte { } return *getOut.Role.Arn, nil } - ctl, err = eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, nil) Expect(err).NotTo(HaveOccurred()) @@ -403,6 +411,96 @@ var _ = Describe("(Integration) [AccessEntries Test]", func() { }, "5m", "30s").ShouldNot(RunSuccessfully()) }) }) + + Context("self-managed nodegroup with a pre-existing instanceRoleARN", func() { + waitAndGetRoleARN := func(ctx context.Context, stackName *string) (string, error) { + waiter := cloudformation.NewStackCreateCompleteWaiter(ctl.AWSProvider.CloudFormation()) + output, err := waiter.WaitForOutput(ctx, &cloudformation.DescribeStacksInput{ + StackName: stackName, + }, 5*time.Minute) + if err != nil { + return "", err + } + if len(output.Stacks) != 1 { + return "", fmt.Errorf("expected to find 1 stack; got %d", len(output.Stacks)) + } + var roleARN string + if err := outputs.Collect(output.Stacks[0], map[string]outputs.Collector{ + "NodeInstanceRoleARN": func(v string) error { + roleARN = v + return nil + }, + }, nil); err != nil { + return "", err + } + return roleARN, nil + } + + var roleARN string + + BeforeAll(func() { + By("creating a CloudFormation stack for NodeInstanceRoleARN") + stackName := aws.String(fmt.Sprintf("%s-%s-role", apiEnabledCluster, namer.RandomName())) + ctx := context.Background() + _, err := ctl.AWSProvider.CloudFormation().CreateStack(ctx, &cloudformation.CreateStackInput{ + StackName: stackName, + TemplateBody: aws.String(string(nodeRoleJSON)), + Capabilities: []cfntypes.Capability{cfntypes.CapabilityCapabilityIam}, + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { + _, err := ctl.AWSProvider.CloudFormation().DeleteStack(ctx, &cloudformation.DeleteStackInput{ + StackName: stackName, + }) + Expect(err).NotTo(HaveOccurred()) + }) + By("waiting for the stack to be created successfully") + roleARN, err = waitAndGetRoleARN(ctx, stackName) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should create an access entry but not delete it", func() { + clusterConfig := makeClusterConfig(apiEnabledCluster) + ng := api.NewNodeGroup() + ng.Name = "with-instance-role-arn" + ng.IAM.InstanceRoleARN = roleARN + ng.ScalingConfig = &api.ScalingConfig{ + DesiredCapacity: aws.Int(1), + } + clusterConfig.NodeGroups = []*api.NodeGroup{ng} + + cmd := params.EksctlCreateNodegroupCmd. + WithArgs("--config-file", "-"). + WithoutArg("--region", params.Region). + WithStdin(clusterutils.Reader(clusterConfig)) + Expect(cmd).To(RunSuccessfullyWithOutputString(ContainSubstring( + fmt.Sprintf("nodegroup %s: created access entry for principal ARN %q", ng.Name, roleARN), + ))) + + assertAccessEntryExists := func() { + _, err = ctl.AWSProvider.EKS().DescribeAccessEntry(context.Background(), &awseks.DescribeAccessEntryInput{ + ClusterName: aws.String(apiEnabledCluster), + PrincipalArn: aws.String(roleARN), + }) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + By("asserting an access entry was created for the nodegroup") + assertAccessEntryExists() + + By("deleting nodegroup with a pre-existing instance role ARN") + cmd = params.EksctlDeleteCmd. + WithArgs( + "nodegroup", + "--config-file", "-", + "--wait", + ). + WithoutArg("--region", params.Region). + WithStdin(clusterutils.Reader(clusterConfig)) + Expect(cmd).To(RunSuccessfully()) + By("asserting the associated access entry is not deleted") + assertAccessEntryExists() + }) + }) }) }) @@ -423,6 +521,7 @@ var _ = SynchronizedAfterSuite(func() {}, func() { WithArgs( "cluster", "--name", apiEnabledCluster, + "--disable-nodegroup-eviction", "--wait", )).To(RunSuccessfully()) diff --git a/integration/tests/accessentries/testdata/node-role.yaml b/integration/tests/accessentries/testdata/node-role.yaml new file mode 100644 index 0000000000..cc46f70adb --- /dev/null +++ b/integration/tests/accessentries/testdata/node-role.yaml @@ -0,0 +1,55 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Node role for access entry test", + "Resources": { + "NodeInstanceRole": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Sub": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + }, + { + "Fn::Sub": "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + }, + { + "Fn::Sub": "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + }, + { + "Fn::Sub": "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + ], + "Path": "/", + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${AWS::StackName}/NodeInstanceRole" + } + } + ] + } + } + }, + "Outputs": { + "NodeInstanceRoleARN": { + "Value": { + "Fn::GetAtt": "NodeInstanceRole.Arn" + } + } + } +} diff --git a/pkg/actions/nodegroup/create.go b/pkg/actions/nodegroup/create.go index 136c27f628..ba7ffda68a 100644 --- a/pkg/actions/nodegroup/create.go +++ b/pkg/actions/nodegroup/create.go @@ -254,8 +254,8 @@ func (m *Manager) nodeCreationTasks(ctx context.Context, isOwnedCluster, skipEgr Parallel: true, } disableAccessEntryCreation := !m.accessEntry.IsEnabled() || updateAuthConfigMap != nil - nodeGroupTasks := m.stackManager.NewUnmanagedNodeGroupTask(ctx, cfg.NodeGroups, !awsNodeUsesIRSA, skipEgressRules, disableAccessEntryCreation, vpcImporter) - if nodeGroupTasks.Len() > 0 { + if nodeGroupTasks := m.stackManager.NewUnmanagedNodeGroupTask(ctx, cfg.NodeGroups, !awsNodeUsesIRSA, skipEgressRules, + disableAccessEntryCreation, vpcImporter); nodeGroupTasks.Len() > 0 { allNodeGroupTasks.Append(nodeGroupTasks) } managedTasks := m.stackManager.NewManagedNodeGroupTask(ctx, cfg.ManagedNodeGroups, !awsNodeUsesIRSA, vpcImporter) diff --git a/pkg/actions/nodegroup/nodegroup.go b/pkg/actions/nodegroup/nodegroup.go index f4ce30e85a..51433c8b0a 100644 --- a/pkg/actions/nodegroup/nodegroup.go +++ b/pkg/actions/nodegroup/nodegroup.go @@ -3,11 +3,10 @@ package nodegroup import ( "time" - "github.com/weaveworks/eksctl/pkg/accessentry" - "github.com/aws/aws-sdk-go/aws/request" "k8s.io/client-go/kubernetes" + "github.com/weaveworks/eksctl/pkg/accessentry" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/cfn/builder" "github.com/weaveworks/eksctl/pkg/cfn/manager" diff --git a/pkg/apis/eksctl.io/v1alpha5/access_entry.go b/pkg/apis/eksctl.io/v1alpha5/access_entry.go index eca373e8de..c837f39feb 100644 --- a/pkg/apis/eksctl.io/v1alpha5/access_entry.go +++ b/pkg/apis/eksctl.io/v1alpha5/access_entry.go @@ -42,6 +42,28 @@ type AccessScope struct { Namespaces []string `json:"namespaces,omitempty"` } +// AccessEntryType represents the type of access entry. +type AccessEntryType string + +const ( + // AccessEntryTypeLinux specifies the EC2 Linux access entry type. + AccessEntryTypeLinux AccessEntryType = "EC2_LINUX" + // AccessEntryTypeWindows specifies the Windows access entry type. + AccessEntryTypeWindows AccessEntryType = "EC2_WINDOWS" + // AccessEntryTypeFargateLinux specifies the Fargate Linux access entry type. + AccessEntryTypeFargateLinux AccessEntryType = "FARGATE_LINUX" + // AccessEntryTypeStandard specifies a standard access entry type. + AccessEntryTypeStandard AccessEntryType = "STANDARD" +) + +// GetAccessEntryType returns the access entry type for the specified AMI family. +func GetAccessEntryType(ng *NodeGroup) AccessEntryType { + if IsWindowsImage(ng.GetAMIFamily()) { + return AccessEntryTypeWindows + } + return AccessEntryTypeLinux +} + type ARN arn.ARN // ARN provides custom unmarshalling for an AWS ARN. @@ -103,9 +125,9 @@ func validateAccessEntries(accessEntries []AccessEntry) error { return fmt.Errorf("%s.principalARN must be set to a valid AWS ARN", path) } - switch ae.Type { - case "", "STANDARD": - case "EC2_LINUX", "EC2_WINDOWS", "FARGATE_LINUX": + switch AccessEntryType(ae.Type) { + case "", AccessEntryTypeStandard: + case AccessEntryTypeLinux, AccessEntryTypeWindows, AccessEntryTypeFargateLinux: if len(ae.KubernetesGroups) > 0 || ae.KubernetesUsername != "" { return fmt.Errorf("cannot specify %s.kubernetesGroups nor %s.kubernetesUsername when type is set to %s", path, path, ae.Type) } diff --git a/pkg/cfn/builder/iam.go b/pkg/cfn/builder/iam.go index 422a03d323..422802c064 100644 --- a/pkg/cfn/builder/iam.go +++ b/pkg/cfn/builder/iam.go @@ -124,21 +124,22 @@ func (n *NodeGroupResourceSet) WithNamedIAM() bool { } func (n *NodeGroupResourceSet) addResourcesForIAM(ctx context.Context) error { - if n.spec.IAM.InstanceProfileARN != "" { + nodeGroupIAM := n.options.NodeGroup.IAM + if nodeGroupIAM.InstanceProfileARN != "" { n.rs.withIAM = false n.rs.withNamedIAM = false // if instance profile is given, as well as the role, we simply use both and export the role // (which is needed in order to authorise the nodegroup) - n.instanceProfileARN = gfnt.NewString(n.spec.IAM.InstanceProfileARN) - if n.spec.IAM.InstanceRoleARN != "" { - n.rs.defineOutputWithoutCollector(outputs.NodeGroupInstanceProfileARN, n.spec.IAM.InstanceProfileARN, true) - n.rs.defineOutputWithoutCollector(outputs.NodeGroupInstanceRoleARN, n.spec.IAM.InstanceRoleARN, true) + n.instanceProfileARN = gfnt.NewString(nodeGroupIAM.InstanceProfileARN) + if nodeGroupIAM.InstanceRoleARN != "" { + n.rs.defineOutputWithoutCollector(outputs.NodeGroupInstanceProfileARN, nodeGroupIAM.InstanceProfileARN, true) + n.rs.defineOutputWithoutCollector(outputs.NodeGroupInstanceRoleARN, nodeGroupIAM.InstanceRoleARN, true) return nil } // if instance role is not given, export profile and use the getter to call importer function - n.rs.defineOutput(outputs.NodeGroupInstanceProfileARN, n.spec.IAM.InstanceProfileARN, true, func(v string) error { - return iam.ImportInstanceRoleFromProfileARN(ctx, n.iamAPI, n.spec, v) + n.rs.defineOutput(outputs.NodeGroupInstanceProfileARN, nodeGroupIAM.InstanceProfileARN, true, func(v string) error { + return iam.ImportInstanceRoleFromProfileARN(ctx, n.iamAPI, n.options.NodeGroup, v) }) return nil @@ -146,8 +147,8 @@ func (n *NodeGroupResourceSet) addResourcesForIAM(ctx context.Context) error { n.rs.withIAM = true - if n.spec.IAM.InstanceRoleARN != "" { - roleARN := NormalizeARN(n.spec.IAM.InstanceRoleARN) + if nodeGroupIAM.InstanceRoleARN != "" { + roleARN := NormalizeARN(nodeGroupIAM.InstanceRoleARN) // if role is set, but profile isn't - create profile n.newResource(cfnIAMInstanceProfileName, &gfniam.InstanceProfile{ @@ -156,7 +157,7 @@ func (n *NodeGroupResourceSet) addResourcesForIAM(ctx context.Context) error { }) n.instanceProfileARN = gfnt.MakeFnGetAttString(cfnIAMInstanceProfileName, "Arn") n.rs.defineOutputFromAtt(outputs.NodeGroupInstanceProfileARN, cfnIAMInstanceProfileName, "Arn", true, func(v string) error { - n.spec.IAM.InstanceProfileARN = v + nodeGroupIAM.InstanceProfileARN = v return nil }) n.rs.defineOutputWithoutCollector(outputs.NodeGroupInstanceRoleARN, roleARN, true) @@ -165,12 +166,12 @@ func (n *NodeGroupResourceSet) addResourcesForIAM(ctx context.Context) error { // if neither role nor profile is given - create both - if n.spec.IAM.InstanceRoleName != "" { + if nodeGroupIAM.InstanceRoleName != "" { // setting role name requires additional capabilities n.rs.withNamedIAM = true } - if err := createRole(n.rs, n.clusterSpec.IAM, n.spec.IAM, false, n.forceAddCNIPolicy); err != nil { + if err := createRole(n.rs, n.options.ClusterConfig.IAM, nodeGroupIAM, false, n.options.ForceAddCNIPolicy); err != nil { return err } @@ -181,11 +182,11 @@ func (n *NodeGroupResourceSet) addResourcesForIAM(ctx context.Context) error { n.instanceProfileARN = gfnt.MakeFnGetAttString(cfnIAMInstanceProfileName, "Arn") n.rs.defineOutputFromAtt(outputs.NodeGroupInstanceProfileARN, cfnIAMInstanceProfileName, "Arn", true, func(v string) error { - n.spec.IAM.InstanceProfileARN = v + nodeGroupIAM.InstanceProfileARN = v return nil }) n.rs.defineOutputFromAtt(outputs.NodeGroupInstanceRoleARN, cfnIAMInstanceRoleName, "Arn", true, func(v string) error { - n.spec.IAM.InstanceRoleARN = v + nodeGroupIAM.InstanceRoleARN = v return nil }) return nil diff --git a/pkg/cfn/builder/nodegroup.go b/pkg/cfn/builder/nodegroup.go index e9bbc63759..2ab774dee8 100644 --- a/pkg/cfn/builder/nodegroup.go +++ b/pkg/cfn/builder/nodegroup.go @@ -38,113 +38,104 @@ const ( // NodeGroupOptions represents options passed to a NodeGroupResourceSet. type NodeGroupOptions struct { - ClusterConfig *api.ClusterConfig - NodeGroup *api.NodeGroup - Bootstrapper nodebootstrap.Bootstrapper - ForceAddCNIPolicy bool - VPCImporter vpc.Importer - SkipEgressRules bool - DisableAccessEntryCreation bool + ClusterConfig *api.ClusterConfig + NodeGroup *api.NodeGroup + Bootstrapper nodebootstrap.Bootstrapper + ForceAddCNIPolicy bool + VPCImporter vpc.Importer + SkipEgressRules bool + DisableAccessEntry bool + // DisableAccessEntryResource disables creation of an access entry resource but still attaches the UsesAccessEntry tag. + DisableAccessEntryResource bool } // NodeGroupResourceSet stores the resource information of the nodegroup type NodeGroupResourceSet struct { - rs *resourceSet - clusterSpec *api.ClusterConfig - spec *api.NodeGroup - forceAddCNIPolicy bool - disableAccessEntryCreation bool - ec2API awsapi.EC2 - - iamAPI awsapi.IAM + rs *resourceSet + iamAPI awsapi.IAM + ec2API awsapi.EC2 + options NodeGroupOptions + instanceProfileARN *gfnt.Value securityGroups []*gfnt.Value vpc *gfnt.Value - vpcImporter vpc.Importer - bootstrapper nodebootstrap.Bootstrapper - skipEgressRules bool } -// NewNodeGroupResourceSet returns a resource set for a nodegroup embedded in a cluster config +// NewNodeGroupResourceSet returns a resource set for a nodegroup embedded in a cluster config. func NewNodeGroupResourceSet(ec2API awsapi.EC2, iamAPI awsapi.IAM, options NodeGroupOptions) *NodeGroupResourceSet { return &NodeGroupResourceSet{ - rs: newResourceSet(), - forceAddCNIPolicy: options.ForceAddCNIPolicy, - clusterSpec: options.ClusterConfig, - spec: options.NodeGroup, - ec2API: ec2API, - iamAPI: iamAPI, - vpcImporter: options.VPCImporter, - bootstrapper: options.Bootstrapper, - skipEgressRules: options.SkipEgressRules, - disableAccessEntryCreation: options.DisableAccessEntryCreation, + rs: newResourceSet(), + ec2API: ec2API, + iamAPI: iamAPI, + options: options, } } // AddAllResources adds all the information about the nodegroup to the resource set func (n *NodeGroupResourceSet) AddAllResources(ctx context.Context) error { - if n.clusterSpec.IPv6Enabled() { + if n.options.ClusterConfig.IPv6Enabled() { return errors.New("unmanaged nodegroups are not supported with IPv6 clusters") } + ng := n.options.NodeGroup n.rs.template.Description = fmt.Sprintf( "%s (AMI family: %s, SSH access: %v, private networking: %v) %s", nodeGroupTemplateDescription, - n.spec.AMIFamily, api.IsEnabled(n.spec.SSH.Allow), n.spec.PrivateNetworking, + ng.AMIFamily, api.IsEnabled(ng.SSH.Allow), ng.PrivateNetworking, templateDescriptionSuffix) n.Template().Mappings[servicePrincipalPartitionMapName] = api.Partitions.ServicePrincipalPartitionMappings() - n.rs.defineOutputWithoutCollector(outputs.NodeGroupFeaturePrivateNetworking, n.spec.PrivateNetworking, false) - n.rs.defineOutputWithoutCollector(outputs.NodeGroupFeatureSharedSecurityGroup, n.spec.SecurityGroups.WithShared, false) - n.rs.defineOutputWithoutCollector(outputs.NodeGroupFeatureLocalSecurityGroup, n.spec.SecurityGroups.WithLocal, false) + n.rs.defineOutputWithoutCollector(outputs.NodeGroupFeaturePrivateNetworking, ng.PrivateNetworking, false) + n.rs.defineOutputWithoutCollector(outputs.NodeGroupFeatureSharedSecurityGroup, ng.SecurityGroups.WithShared, false) + n.rs.defineOutputWithoutCollector(outputs.NodeGroupFeatureLocalSecurityGroup, ng.SecurityGroups.WithLocal, false) - n.vpc = n.vpcImporter.VPC() + n.vpc = n.options.VPCImporter.VPC() - if n.spec.Tags == nil { - n.spec.Tags = map[string]string{} + if ng.Tags == nil { + ng.Tags = map[string]string{} } - for k, v := range n.clusterSpec.Metadata.Tags { - if _, exists := n.spec.Tags[k]; !exists { - n.spec.Tags[k] = v + for k, v := range n.options.ClusterConfig.Metadata.Tags { + if _, exists := ng.Tags[k]; !exists { + ng.Tags[k] = v } } // Ensure MinSize is set, as it is required by the ASG cfn resource // TODO this validation and default setting should happen way earlier than this - if n.spec.MinSize == nil { - if n.spec.DesiredCapacity == nil { + if ng.MinSize == nil { + if ng.DesiredCapacity == nil { defaultNodeCount := api.DefaultNodeCount - n.spec.MinSize = &defaultNodeCount + ng.MinSize = &defaultNodeCount } else { - n.spec.MinSize = n.spec.DesiredCapacity + ng.MinSize = ng.DesiredCapacity } - logger.Info("--nodes-min=%d was set automatically for nodegroup %s", *n.spec.MinSize, n.spec.Name) - } else if n.spec.DesiredCapacity != nil && *n.spec.DesiredCapacity < *n.spec.MinSize { - return fmt.Errorf("--nodes value (%d) cannot be lower than --nodes-min value (%d)", *n.spec.DesiredCapacity, *n.spec.MinSize) + logger.Info("--nodes-min=%d was set automatically for nodegroup %s", *ng.MinSize, ng.Name) + } else if ng.DesiredCapacity != nil && *ng.DesiredCapacity < *ng.MinSize { + return fmt.Errorf("--nodes value (%d) cannot be lower than --nodes-min value (%d)", *ng.DesiredCapacity, *ng.MinSize) } // Ensure MaxSize is set, as it is required by the ASG cfn resource - if n.spec.MaxSize == nil { - if n.spec.DesiredCapacity == nil { - n.spec.MaxSize = n.spec.MinSize + if ng.MaxSize == nil { + if ng.DesiredCapacity == nil { + ng.MaxSize = ng.MinSize } else { - n.spec.MaxSize = n.spec.DesiredCapacity + ng.MaxSize = ng.DesiredCapacity } - logger.Info("--nodes-max=%d was set automatically for nodegroup %s", *n.spec.MaxSize, n.spec.Name) - } else if n.spec.DesiredCapacity != nil && *n.spec.DesiredCapacity > *n.spec.MaxSize { - return fmt.Errorf("--nodes value (%d) cannot be greater than --nodes-max value (%d)", *n.spec.DesiredCapacity, *n.spec.MaxSize) - } else if *n.spec.MaxSize < *n.spec.MinSize { - return fmt.Errorf("--nodes-min value (%d) cannot be greater than --nodes-max value (%d)", *n.spec.MinSize, *n.spec.MaxSize) + logger.Info("--nodes-max=%d was set automatically for nodegroup %s", *ng.MaxSize, ng.Name) + } else if ng.DesiredCapacity != nil && *ng.DesiredCapacity > *ng.MaxSize { + return fmt.Errorf("--nodes value (%d) cannot be greater than --nodes-max value (%d)", *ng.DesiredCapacity, *ng.MaxSize) + } else if *ng.MaxSize < *ng.MinSize { + return fmt.Errorf("--nodes-min value (%d) cannot be greater than --nodes-max value (%d)", *ng.MinSize, *ng.MaxSize) } if err := n.addResourcesForIAM(ctx); err != nil { return err } n.addResourcesForSecurityGroups() - if !n.disableAccessEntryCreation { + if !n.options.DisableAccessEntry { n.addAccessEntry() } @@ -180,63 +171,54 @@ var ControlPlaneNodeGroupEgressRules = []PartialEgressRule{ var ControlPlaneEgressRuleDescriptionPrefix = "Allow control plane to communicate with " func (n *NodeGroupResourceSet) addAccessEntry() { - // TODO: SDK types. - var amiType string - if api.IsWindowsImage(n.spec.AMIFamily) { - amiType = "EC2_WINDOWS" - } else { - amiType = "EC2_LINUX" + n.rs.defineOutputWithoutCollector(outputs.NodeGroupUsesAccessEntry, true, false) + if n.options.DisableAccessEntryResource { + return } - var principalARN *gfnt.Value - if roleARN := n.spec.IAM.InstanceRoleARN; roleARN != "" { - principalARN = gfnt.NewString(roleARN) - } else { - principalARN = gfnt.MakeFnGetAttString(cfnIAMInstanceRoleName, "Arn") - } n.newResource("AccessEntry", &gfneks.AccessEntry{ - PrincipalArn: principalARN, - ClusterName: gfnt.NewString(n.clusterSpec.Metadata.Name), - Type: gfnt.NewString(amiType), + PrincipalArn: gfnt.MakeFnGetAttString(cfnIAMInstanceRoleName, "Arn"), + ClusterName: gfnt.NewString(n.options.ClusterConfig.Metadata.Name), + Type: gfnt.NewString(string(api.GetAccessEntryType(n.options.NodeGroup))), }) - n.rs.defineOutputWithoutCollector(outputs.NodeGroupUsesAccessEntry, true, false) } func (n *NodeGroupResourceSet) addResourcesForSecurityGroups() { - for _, id := range n.spec.SecurityGroups.AttachIDs { + ng := n.options.NodeGroup + for _, id := range ng.SecurityGroups.AttachIDs { n.securityGroups = append(n.securityGroups, gfnt.NewString(id)) } - if api.IsEnabled(n.spec.SecurityGroups.WithShared) { - n.securityGroups = append(n.securityGroups, n.vpcImporter.SharedNodeSecurityGroup()) + if api.IsEnabled(ng.SecurityGroups.WithShared) { + n.securityGroups = append(n.securityGroups, n.options.VPCImporter.SharedNodeSecurityGroup()) } - if api.IsDisabled(n.spec.SecurityGroups.WithLocal) { + if api.IsDisabled(ng.SecurityGroups.WithLocal) { return } - desc := "worker nodes in group " + n.spec.Name - vpcID := n.vpcImporter.VPC() - refControlPlaneSG := n.vpcImporter.ControlPlaneSecurityGroup() + desc := "worker nodes in group " + ng.Name + vpcID := n.options.VPCImporter.VPC() + refControlPlaneSG := n.options.VPCImporter.ControlPlaneSecurityGroup() refNodeGroupLocalSG := n.newResource("SG", &gfnec2.SecurityGroup{ VpcId: vpcID, GroupDescription: gfnt.NewString("Communication between the control plane and " + desc), Tags: []gfncfn.Tag{{ - Key: gfnt.NewString("kubernetes.io/cluster/" + n.clusterSpec.Metadata.Name), + Key: gfnt.NewString("kubernetes.io/cluster/" + n.options.ClusterConfig.Metadata.Name), Value: gfnt.NewString("owned"), }}, - SecurityGroupIngress: makeNodeIngressRules(n.spec.NodeGroupBase, refControlPlaneSG, n.clusterSpec.VPC.CIDR.String(), desc), + SecurityGroupIngress: makeNodeIngressRules(ng.NodeGroupBase, refControlPlaneSG, n.options.ClusterConfig.VPC.CIDR.String(), desc), }) n.securityGroups = append(n.securityGroups, refNodeGroupLocalSG) - if api.IsEnabled(n.spec.EFAEnabled) { - efaSG := n.rs.addEFASecurityGroup(vpcID, n.clusterSpec.Metadata.Name, desc) + if api.IsEnabled(ng.EFAEnabled) { + efaSG := n.rs.addEFASecurityGroup(vpcID, n.options.ClusterConfig.Metadata.Name, desc) n.securityGroups = append(n.securityGroups, efaSG) } - if !n.skipEgressRules { + if !n.options.SkipEgressRules { n.newResource("EgressInterCluster", &gfnec2.SecurityGroupEgress{ GroupId: refControlPlaneSG, DestinationSecurityGroupId: refNodeGroupLocalSG, @@ -306,18 +288,19 @@ func (n *NodeGroupResourceSet) addResourcesForNodeGroup(ctx context.Context) err return errors.Wrap(err, "could not add resources for nodegroup") } - if n.spec.SSH != nil && api.IsSetAndNonEmptyString(n.spec.SSH.PublicKeyName) { - launchTemplateData.KeyName = gfnt.NewString(*n.spec.SSH.PublicKeyName) + ng := n.options.NodeGroup + if ng.SSH != nil && api.IsSetAndNonEmptyString(ng.SSH.PublicKeyName) { + launchTemplateData.KeyName = gfnt.NewString(*ng.SSH.PublicKeyName) } - launchTemplateData.BlockDeviceMappings = makeBlockDeviceMappings(n.spec.NodeGroupBase) + launchTemplateData.BlockDeviceMappings = makeBlockDeviceMappings(ng.NodeGroupBase) n.newResource("NodeGroupLaunchTemplate", &gfnec2.LaunchTemplate{ LaunchTemplateName: launchTemplateName, LaunchTemplateData: launchTemplateData, }) - vpcZoneIdentifier, err := AssignSubnets(ctx, n.spec, n.clusterSpec, n.ec2API) + vpcZoneIdentifier, err := AssignSubnets(ctx, ng, n.options.ClusterConfig, n.ec2API) if err != nil { return err } @@ -325,16 +308,16 @@ func (n *NodeGroupResourceSet) addResourcesForNodeGroup(ctx context.Context) err tags := []map[string]string{ { "Key": "Name", - "Value": generateNodeName(n.spec.NodeGroupBase, n.clusterSpec.Metadata), + "Value": generateNodeName(ng.NodeGroupBase, n.options.ClusterConfig.Metadata), "PropagateAtLaunch": "true", }, { - "Key": "kubernetes.io/cluster/" + n.clusterSpec.Metadata.Name, + "Key": "kubernetes.io/cluster/" + n.options.ClusterConfig.Metadata.Name, "Value": "owned", "PropagateAtLaunch": "true", }, } - if api.IsEnabled(n.spec.IAM.WithAddonPolicies.AutoScaler) { + if api.IsEnabled(ng.IAM.WithAddonPolicies.AutoScaler) { tags = append(tags, map[string]string{ "Key": "k8s.io/cluster-autoscaler/enabled", @@ -342,16 +325,16 @@ func (n *NodeGroupResourceSet) addResourcesForNodeGroup(ctx context.Context) err "PropagateAtLaunch": "true", }, map[string]string{ - "Key": "k8s.io/cluster-autoscaler/" + n.clusterSpec.Metadata.Name, + "Key": "k8s.io/cluster-autoscaler/" + n.options.ClusterConfig.Metadata.Name, "Value": "owned", "PropagateAtLaunch": "true", }, ) } - if api.IsEnabled(n.spec.PropagateASGTags) { + if api.IsEnabled(ng.PropagateASGTags) { var clusterTags []map[string]string - GenerateClusterAutoscalerTags(n.spec, func(key, value string) { + GenerateClusterAutoscalerTags(ng, func(key, value string) { clusterTags = append(clusterTags, map[string]string{ "Key": key, "Value": value, @@ -364,7 +347,7 @@ func (n *NodeGroupResourceSet) addResourcesForNodeGroup(ctx context.Context) err } } - asg := nodeGroupResource(launchTemplateName, vpcZoneIdentifier, tags, n.spec) + asg := nodeGroupResource(launchTemplateName, vpcZoneIdentifier, tags, ng) n.newResource("NodeGroup", asg) return nil @@ -456,22 +439,23 @@ func (n *NodeGroupResourceSet) GetAllOutputs(stack types.Stack) error { } func newLaunchTemplateData(ctx context.Context, n *NodeGroupResourceSet) (*gfnec2.LaunchTemplate_LaunchTemplateData, error) { - userData, err := n.bootstrapper.UserData() + userData, err := n.options.Bootstrapper.UserData() if err != nil { return nil, err } + ng := n.options.NodeGroup launchTemplateData := &gfnec2.LaunchTemplate_LaunchTemplateData{ IamInstanceProfile: &gfnec2.LaunchTemplate_IamInstanceProfile{ Arn: n.instanceProfileARN, }, - ImageId: gfnt.NewString(n.spec.AMI), + ImageId: gfnt.NewString(ng.AMI), UserData: gfnt.NewString(userData), - MetadataOptions: makeMetadataOptions(n.spec.NodeGroupBase), - TagSpecifications: makeTags(n.spec.NodeGroupBase, n.clusterSpec.Metadata), + MetadataOptions: makeMetadataOptions(ng.NodeGroupBase), + TagSpecifications: makeTags(ng.NodeGroupBase, n.options.ClusterConfig.Metadata), } - if n.spec.CapacityReservation != nil { + if ng.CapacityReservation != nil { valueOrNil := func(value *string) *gfnt.Value { if value != nil { return gfnt.NewString(*value) @@ -479,20 +463,20 @@ func newLaunchTemplateData(ctx context.Context, n *NodeGroupResourceSet) (*gfnec return nil } launchTemplateData.CapacityReservationSpecification = &gfnec2.LaunchTemplate_CapacityReservationSpecification{} - launchTemplateData.CapacityReservationSpecification.CapacityReservationPreference = valueOrNil(n.spec.CapacityReservation.CapacityReservationPreference) - if n.spec.CapacityReservation.CapacityReservationTarget != nil { + launchTemplateData.CapacityReservationSpecification.CapacityReservationPreference = valueOrNil(ng.CapacityReservation.CapacityReservationPreference) + if ng.CapacityReservation.CapacityReservationTarget != nil { launchTemplateData.CapacityReservationSpecification.CapacityReservationTarget = &gfnec2.LaunchTemplate_CapacityReservationTarget{ - CapacityReservationId: valueOrNil(n.spec.CapacityReservation.CapacityReservationTarget.CapacityReservationID), - CapacityReservationResourceGroupArn: valueOrNil(n.spec.CapacityReservation.CapacityReservationTarget.CapacityReservationResourceGroupARN), + CapacityReservationId: valueOrNil(ng.CapacityReservation.CapacityReservationTarget.CapacityReservationID), + CapacityReservationResourceGroupArn: valueOrNil(ng.CapacityReservation.CapacityReservationTarget.CapacityReservationResourceGroupARN), } } } - if err := buildNetworkInterfaces(ctx, launchTemplateData, n.spec.InstanceTypeList(), api.IsEnabled(n.spec.EFAEnabled), n.securityGroups, n.ec2API); err != nil { + if err := buildNetworkInterfaces(ctx, launchTemplateData, ng.InstanceTypeList(), api.IsEnabled(ng.EFAEnabled), n.securityGroups, n.ec2API); err != nil { return nil, errors.Wrap(err, "couldn't build network interfaces for launch template data") } - if api.IsEnabled(n.spec.EFAEnabled) && n.spec.Placement == nil { + if api.IsEnabled(ng.EFAEnabled) && ng.Placement == nil { groupName := n.newResource("NodeGroupPlacementGroup", &gfnec2.PlacementGroup{ Strategy: gfnt.NewString("cluster"), }) @@ -501,30 +485,30 @@ func newLaunchTemplateData(ctx context.Context, n *NodeGroupResourceSet) (*gfnec } } - if !api.HasMixedInstances(n.spec) { - launchTemplateData.InstanceType = gfnt.NewString(n.spec.InstanceType) + if !api.HasMixedInstances(ng) { + launchTemplateData.InstanceType = gfnt.NewString(ng.InstanceType) } else { - launchTemplateData.InstanceType = gfnt.NewString(n.spec.InstancesDistribution.InstanceTypes[0]) + launchTemplateData.InstanceType = gfnt.NewString(ng.InstancesDistribution.InstanceTypes[0]) } - if n.spec.EBSOptimized != nil { - launchTemplateData.EbsOptimized = gfnt.NewBoolean(*n.spec.EBSOptimized) + if ng.EBSOptimized != nil { + launchTemplateData.EbsOptimized = gfnt.NewBoolean(*ng.EBSOptimized) } - if n.spec.CPUCredits != nil { + if ng.CPUCredits != nil { launchTemplateData.CreditSpecification = &gfnec2.LaunchTemplate_CreditSpecification{ - CpuCredits: gfnt.NewString(strings.ToLower(*n.spec.CPUCredits)), + CpuCredits: gfnt.NewString(strings.ToLower(*ng.CPUCredits)), } } - if n.spec.Placement != nil { + if ng.Placement != nil { launchTemplateData.Placement = &gfnec2.LaunchTemplate_Placement{ - GroupName: gfnt.NewString(n.spec.Placement.GroupName), + GroupName: gfnt.NewString(ng.Placement.GroupName), } } - if n.spec.EnableDetailedMonitoring != nil { + if ng.EnableDetailedMonitoring != nil { launchTemplateData.Monitoring = &gfnec2.LaunchTemplate_Monitoring{ - Enabled: gfnt.NewBoolean(*n.spec.EnableDetailedMonitoring), + Enabled: gfnt.NewBoolean(*ng.EnableDetailedMonitoring), } } diff --git a/pkg/cfn/builder/nodegroup_access_entry_test.go b/pkg/cfn/builder/nodegroup_access_entry_test.go index a608941a61..9ab8e9b033 100644 --- a/pkg/cfn/builder/nodegroup_access_entry_test.go +++ b/pkg/cfn/builder/nodegroup_access_entry_test.go @@ -5,10 +5,11 @@ import ( "os" "path" - . "github.com/benjamintf1/unmarshalledmatchers" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/benjamintf1/unmarshalledmatchers" + api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" @@ -20,6 +21,7 @@ import ( var _ = Describe("Nodegroup Builder", func() { type ngResourceTest struct { disableAccessEntryCreation bool + disableAccessEntryResource bool resourceFilename string } @@ -38,14 +40,15 @@ var _ = Describe("Nodegroup Builder", func() { ForceAddCNIPolicy: false, VPCImporter: fakeVPCImporter, SkipEgressRules: false, - DisableAccessEntryCreation: t.disableAccessEntryCreation, + DisableAccessEntry: t.disableAccessEntryCreation, + DisableAccessEntryResource: t.disableAccessEntryResource, }) Expect(resourceSet.AddAllResources(context.Background())).To(Succeed()) actual, err := resourceSet.RenderJSON() Expect(err).NotTo(HaveOccurred()) expected, err := os.ReadFile(path.Join("testdata", "nodegroup_access_entry", t.resourceFilename)) Expect(err).NotTo(HaveOccurred()) - Expect(actual).To(MatchOrderedJSON(expected, WithUnorderedListKeys("Tags"))) + Expect(actual).To(unmarshalledmatchers.MatchOrderedJSON(expected, unmarshalledmatchers.WithUnorderedListKeys("Tags"))) }, Entry("with access entry", ngResourceTest{ resourceFilename: "1.json", @@ -54,5 +57,9 @@ var _ = Describe("Nodegroup Builder", func() { disableAccessEntryCreation: true, resourceFilename: "2.json", }), + Entry("without access entry resource", ngResourceTest{ + disableAccessEntryResource: true, + resourceFilename: "3.json", + }), ) }) diff --git a/pkg/cfn/builder/nodegroup_test.go b/pkg/cfn/builder/nodegroup_test.go index 070dd0634e..42f8df1155 100644 --- a/pkg/cfn/builder/nodegroup_test.go +++ b/pkg/cfn/builder/nodegroup_test.go @@ -55,13 +55,13 @@ var _ = Describe("Unmanaged NodeGroup Template Builder", func() { JustBeforeEach(func() { ngrs = builder.NewNodeGroupResourceSet(p.MockEC2(), p.MockIAM(), builder.NodeGroupOptions{ - ClusterConfig: cfg, - NodeGroup: ng, - Bootstrapper: fakeBootstrapper, - ForceAddCNIPolicy: forceAddCNIPolicy, - VPCImporter: fakeVPCImporter, - SkipEgressRules: skipEgressRules, - DisableAccessEntryCreation: false, + ClusterConfig: cfg, + NodeGroup: ng, + Bootstrapper: fakeBootstrapper, + ForceAddCNIPolicy: forceAddCNIPolicy, + VPCImporter: fakeVPCImporter, + SkipEgressRules: skipEgressRules, + DisableAccessEntry: false, }) }) diff --git a/pkg/cfn/builder/testdata/nodegroup_access_entry/3.json b/pkg/cfn/builder/testdata/nodegroup_access_entry/3.json new file mode 100644 index 0000000000..bcfd6635dd --- /dev/null +++ b/pkg/cfn/builder/testdata/nodegroup_access_entry/3.json @@ -0,0 +1,317 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "EKS nodes (AMI family: , SSH access: false, private networking: false) [created and managed by eksctl]", + "Mappings": { + "ServicePrincipalPartitionMap": { + "aws": { + "EC2": "ec2.amazonaws.com", + "EKS": "eks.amazonaws.com", + "EKSFargatePods": "eks-fargate-pods.amazonaws.com" + }, + "aws-cn": { + "EC2": "ec2.amazonaws.com.cn", + "EKS": "eks.amazonaws.com", + "EKSFargatePods": "eks-fargate-pods.amazonaws.com" + }, + "aws-iso": { + "EC2": "ec2.c2s.ic.gov", + "EKS": "eks.amazonaws.com", + "EKSFargatePods": "eks-fargate-pods.amazonaws.com" + }, + "aws-iso-b": { + "EC2": "ec2.sc2s.sgov.gov", + "EKS": "eks.amazonaws.com", + "EKSFargatePods": "eks-fargate-pods.amazonaws.com" + }, + "aws-us-gov": { + "EC2": "ec2.amazonaws.com", + "EKS": "eks.amazonaws.com", + "EKSFargatePods": "eks-fargate-pods.amazonaws.com" + } + } + }, + "Resources": { + "EgressInterCluster": { + "Type": "AWS::EC2::SecurityGroupEgress", + "Properties": { + "Description": "Allow control plane to communicate with worker nodes in group (kubelet and workload TCP ports)", + "DestinationSecurityGroupId": { + "Ref": "SG" + }, + "FromPort": 1025, + "IpProtocol": "tcp", + "ToPort": 65535 + } + }, + "EgressInterClusterAPI": { + "Type": "AWS::EC2::SecurityGroupEgress", + "Properties": { + "Description": "Allow control plane to communicate with worker nodes in group (workloads using HTTPS port, commonly used with extension API servers)", + "DestinationSecurityGroupId": { + "Ref": "SG" + }, + "FromPort": 443, + "IpProtocol": "tcp", + "ToPort": 443 + } + }, + "IngressInterClusterCP": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "Description": "Allow control plane to receive API requests from worker nodes in group ", + "FromPort": 443, + "IpProtocol": "tcp", + "SourceSecurityGroupId": { + "Ref": "SG" + }, + "ToPort": 443 + } + }, + "NodeGroup": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "LaunchTemplate": { + "LaunchTemplateName": { + "Fn::Sub": "${AWS::StackName}" + }, + "Version": { + "Fn::GetAtt": [ + "NodeGroupLaunchTemplate", + "LatestVersionNumber" + ] + } + }, + "MaxSize": "2", + "MinSize": "2", + "Tags": [ + { + "Key": "Name", + "PropagateAtLaunch": "true", + "Value": "cluster--Node" + }, + { + "Key": "kubernetes.io/cluster/cluster", + "PropagateAtLaunch": "true", + "Value": "owned" + } + ], + "VPCZoneIdentifier": [ + "subnet-public-us-west-1a" + ] + }, + "UpdatePolicy": { + "AutoScalingRollingUpdate": {} + } + }, + "NodeGroupLaunchTemplate": { + "Type": "AWS::EC2::LaunchTemplate", + "Properties": { + "LaunchTemplateData": { + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "VolumeSize": 80, + "VolumeType": "gp3" + } + } + ], + "IamInstanceProfile": { + "Arn": { + "Fn::GetAtt": [ + "NodeInstanceProfile", + "Arn" + ] + } + }, + "ImageId": "", + "InstanceType": "", + "MetadataOptions": { + "HttpPutResponseHopLimit": 2, + "HttpTokens": "required" + }, + "NetworkInterfaces": [ + { + "DeviceIndex": 0, + "Groups": [ + null, + { + "Ref": "SG" + } + ], + "NetworkCardIndex": 0 + } + ], + "TagSpecifications": [ + { + "ResourceType": "instance", + "Tags": [ + { + "Key": "Name", + "Value": "cluster--Node" + } + ] + }, + { + "ResourceType": "volume", + "Tags": [ + { + "Key": "Name", + "Value": "cluster--Node" + } + ] + }, + { + "ResourceType": "network-interface", + "Tags": [ + { + "Key": "Name", + "Value": "cluster--Node" + } + ] + } + ], + "UserData": "" + }, + "LaunchTemplateName": { + "Fn::Sub": "${AWS::StackName}" + } + } + }, + "NodeInstanceProfile": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": { + "Path": "/", + "Roles": [ + { + "Ref": "NodeInstanceRole" + } + ] + } + }, + "NodeInstanceRole": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + { + "Fn::FindInMap": [ + "ServicePrincipalPartitionMap", + { + "Ref": "AWS::Partition" + }, + "EC2" + ] + } + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + }, + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/AmazonEKSWorkerNodePolicy" + }, + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/AmazonEKS_CNI_Policy" + }, + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + ], + "Path": "/", + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${AWS::StackName}/NodeInstanceRole" + } + } + ] + } + }, + "SG": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Communication between the control plane and worker nodes in group ", + "SecurityGroupIngress": [ + { + "Description": "[IngressInterCluster] Allow worker nodes in group to communicate with control plane (kubelet and workload TCP ports)", + "FromPort": 1025, + "IpProtocol": "tcp", + "ToPort": 65535 + }, + { + "Description": "[IngressInterClusterAPI] Allow worker nodes in group to communicate with control plane (workloads using HTTPS port, commonly used with extension API servers)", + "FromPort": 443, + "IpProtocol": "tcp", + "ToPort": 443 + } + ], + "Tags": [ + { + "Key": "kubernetes.io/cluster/cluster", + "Value": "owned" + }, + { + "Key": "Name", + "Value": { + "Fn::Sub": "${AWS::StackName}/SG" + } + } + ] + } + } + }, + "Outputs": { + "FeatureLocalSecurityGroup": { + "Value": true + }, + "FeaturePrivateNetworking": { + "Value": false + }, + "FeatureSharedSecurityGroup": { + "Value": true + }, + "NodeGroupUsesAccessEntry": { + "Value": true + }, + "InstanceProfileARN": { + "Value": { + "Fn::GetAtt": [ + "NodeInstanceProfile", + "Arn" + ] + }, + "Export": { + "Name": { + "Fn::Sub": "${AWS::StackName}::InstanceProfileARN" + } + } + }, + "InstanceRoleARN": { + "Value": { + "Fn::GetAtt": [ + "NodeInstanceRole", + "Arn" + ] + }, + "Export": { + "Name": { + "Fn::Sub": "${AWS::StackName}::InstanceRoleARN" + } + } + } + } +} diff --git a/pkg/cfn/manager/create_tasks.go b/pkg/cfn/manager/create_tasks.go index ea0b596d6a..dfdec44f97 100644 --- a/pkg/cfn/manager/create_tasks.go +++ b/pkg/cfn/manager/create_tasks.go @@ -4,16 +4,20 @@ import ( "context" "fmt" - "github.com/kris-nova/logger" "github.com/pkg/errors" + + "github.com/kris-nova/logger" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/weaveworks/eksctl/pkg/actions/accessentry" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/cfn/builder" iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc" "github.com/weaveworks/eksctl/pkg/kubernetes" + "github.com/weaveworks/eksctl/pkg/nodebootstrap" "github.com/weaveworks/eksctl/pkg/utils/tasks" "github.com/weaveworks/eksctl/pkg/vpc" ) @@ -75,25 +79,26 @@ func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroup return &taskTree } -// NewUnmanagedNodeGroupTask defines tasks required to create all nodegroups. +// NewUnmanagedNodeGroupTask returns tasks for creating self-managed nodegroups. func (c *StackCollection) NewUnmanagedNodeGroupTask(ctx context.Context, nodeGroups []*api.NodeGroup, forceAddCNIPolicy, skipEgressRules, disableAccessEntryCreation bool, vpcImporter vpc.Importer) *tasks.TaskTree { - taskTree := &tasks.TaskTree{Parallel: true} - - for _, ng := range nodeGroups { - taskTree.Append(&nodeGroupTask{ - info: fmt.Sprintf("create nodegroup %q", ng.NameString()), - ctx: ctx, - nodeGroup: ng, - stackCollection: c, - forceAddCNIPolicy: forceAddCNIPolicy, - vpcImporter: vpcImporter, - skipEgressRules: skipEgressRules, - disableAccessEntryCreation: disableAccessEntryCreation, - }) - // TODO: move authconfigmap tasks here using kubernetesTask and kubernetes.CallbackClientSet + task := &UnmanagedNodeGroupTask{ + ClusterConfig: c.spec, + NodeGroups: nodeGroups, + CreateNodeGroupResourceSet: func(options builder.NodeGroupOptions) NodeGroupResourceSet { + return builder.NewNodeGroupResourceSet(c.ec2API, c.iamAPI, options) + }, + NewBootstrapper: func(clusterConfig *api.ClusterConfig, ng *api.NodeGroup) (nodebootstrap.Bootstrapper, error) { + return nodebootstrap.NewBootstrapper(clusterConfig, ng) + }, + EKSAPI: c.eksAPI, + StackManager: c, } - - return taskTree + return task.Create(ctx, CreateNodeGroupOptions{ + ForceAddCNIPolicy: forceAddCNIPolicy, + SkipEgressRules: skipEgressRules, + DisableAccessEntryCreation: disableAccessEntryCreation, + VPCImporter: vpcImporter, + }) } // NewManagedNodeGroupTask defines tasks required to create managed nodegroups diff --git a/pkg/cfn/manager/delete_tasks.go b/pkg/cfn/manager/delete_tasks.go index 8ff9f7f8c4..512ab03c77 100644 --- a/pkg/cfn/manager/delete_tasks.go +++ b/pkg/cfn/manager/delete_tasks.go @@ -122,7 +122,7 @@ func (c *StackCollection) NewTasksToDeleteClusterWithNodeGroups( return taskTree, nil } -// NewTasksToDeleteNodeGroups defines tasks required to delete all of the nodegroups +// NewTasksToDeleteNodeGroups defines tasks required to delete all nodegroups. func (c *StackCollection) NewTasksToDeleteNodeGroups(nodeGroupStacks []NodeGroupStack, shouldDelete func(string) bool, wait bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error) { taskTree := &tasks.TaskTree{Parallel: true} diff --git a/pkg/cfn/manager/mocks/NodeGroupResourceSet.go b/pkg/cfn/manager/mocks/NodeGroupResourceSet.go new file mode 100644 index 0000000000..b7be64ac5a --- /dev/null +++ b/pkg/cfn/manager/mocks/NodeGroupResourceSet.go @@ -0,0 +1,112 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" +) + +// NodeGroupResourceSet is an autogenerated mock type for the NodeGroupResourceSet type +type NodeGroupResourceSet struct { + mock.Mock +} + +// AddAllResources provides a mock function with given fields: ctx +func (_m *NodeGroupResourceSet) AddAllResources(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetAllOutputs provides a mock function with given fields: _a0 +func (_m *NodeGroupResourceSet) GetAllOutputs(_a0 types.Stack) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(types.Stack) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RenderJSON provides a mock function with given fields: +func (_m *NodeGroupResourceSet) RenderJSON() ([]byte, error) { + ret := _m.Called() + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func() ([]byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WithIAM provides a mock function with given fields: +func (_m *NodeGroupResourceSet) WithIAM() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// WithNamedIAM provides a mock function with given fields: +func (_m *NodeGroupResourceSet) WithNamedIAM() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewNodeGroupResourceSet creates a new instance of NodeGroupResourceSet. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeGroupResourceSet(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeGroupResourceSet { + mock := &NodeGroupResourceSet{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/cfn/manager/mocks/NodeGroupStackManager.go b/pkg/cfn/manager/mocks/NodeGroupStackManager.go new file mode 100644 index 0000000000..f39bdf0837 --- /dev/null +++ b/pkg/cfn/manager/mocks/NodeGroupStackManager.go @@ -0,0 +1,48 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + builder "github.com/weaveworks/eksctl/pkg/cfn/builder" + + mock "github.com/stretchr/testify/mock" +) + +// NodeGroupStackManager is an autogenerated mock type for the NodeGroupStackManager type +type NodeGroupStackManager struct { + mock.Mock +} + +// CreateStack provides a mock function with given fields: ctx, stackName, resourceSet, tags, parameters, errs +func (_m *NodeGroupStackManager) CreateStack(ctx context.Context, stackName string, resourceSet builder.ResourceSetReader, tags map[string]string, parameters map[string]string, errs chan error) error { + ret := _m.Called(ctx, stackName, resourceSet, tags, parameters, errs) + + if len(ret) == 0 { + panic("no return value specified for CreateStack") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, builder.ResourceSetReader, map[string]string, map[string]string, chan error) error); ok { + r0 = rf(ctx, stackName, resourceSet, tags, parameters, errs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewNodeGroupStackManager creates a new instance of NodeGroupStackManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeGroupStackManager(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeGroupStackManager { + mock := &NodeGroupStackManager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/cfn/manager/nodegroup.go b/pkg/cfn/manager/nodegroup.go index d4373b3376..adb752d1ec 100644 --- a/pkg/cfn/manager/nodegroup.go +++ b/pkg/cfn/manager/nodegroup.go @@ -5,6 +5,8 @@ import ( "fmt" "strings" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/autoscaling" asgtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" @@ -17,8 +19,10 @@ import ( "github.com/pkg/errors" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/awsapi" "github.com/weaveworks/eksctl/pkg/cfn/builder" "github.com/weaveworks/eksctl/pkg/nodebootstrap" + "github.com/weaveworks/eksctl/pkg/utils/tasks" "github.com/weaveworks/eksctl/pkg/version" "github.com/weaveworks/eksctl/pkg/vpc" ) @@ -31,30 +35,102 @@ type NodeGroupStack struct { Stack *Stack } -// makeNodeGroupStackName generates the name of the nodegroup stack identified by its name, isolated by the cluster this StackCollection operates on -func (c *StackCollection) makeNodeGroupStackName(name string) string { - return fmt.Sprintf("eksctl-%s-nodegroup-%s", c.spec.Metadata.Name, name) +// makeNodeGroupStackName generates the name of the nodegroup stack identified by its name. +func makeNodeGroupStackName(clusterName, ngName string) string { + return fmt.Sprintf("eksctl-%s-nodegroup-%s", clusterName, ngName) } -// createNodeGroupTask creates the nodegroup -func (c *StackCollection) createNodeGroupTask(ctx context.Context, errs chan error, ng *api.NodeGroup, forceAddCNIPolicy, skipEgressRules, disableAccessEntryCreation bool, vpcImporter vpc.Importer) error { - name := c.makeNodeGroupStackName(ng.Name) +// CreateNodeGroupOptions holds options for creating nodegroup tasks. +type CreateNodeGroupOptions struct { + ForceAddCNIPolicy bool + SkipEgressRules bool + DisableAccessEntryCreation bool + VPCImporter vpc.Importer +} + +// A NodeGroupStackManager describes and creates nodegroup stacks. +type NodeGroupStackManager interface { + // CreateStack creates a CloudFormation stack. + CreateStack(ctx context.Context, stackName string, resourceSet builder.ResourceSetReader, tags, parameters map[string]string, errs chan error) error +} + +// A NodeGroupResourceSet creates resources for a nodegroup. +// +//counterfeiter:generate -o fakes/fake_nodegroup_resource_set.go . NodeGroupResourceSet +type NodeGroupResourceSet interface { + // AddAllResources adds all nodegroup resources. + AddAllResources(ctx context.Context) error + builder.ResourceSetReader +} + +// CreateNodeGroupResourceSetFunc creates a new NodeGroupResourceSet. +type CreateNodeGroupResourceSetFunc func(options builder.NodeGroupOptions) NodeGroupResourceSet + +// NewBootstrapperFunc creates a new Bootstrapper for ng. +type NewBootstrapperFunc func(clusterConfig *api.ClusterConfig, ng *api.NodeGroup) (nodebootstrap.Bootstrapper, error) + +// UnmanagedNodeGroupTask creates tasks for creating self-managed nodegroups. +type UnmanagedNodeGroupTask struct { + ClusterConfig *api.ClusterConfig + NodeGroups []*api.NodeGroup + CreateNodeGroupResourceSet CreateNodeGroupResourceSetFunc + NewBootstrapper NewBootstrapperFunc + EKSAPI awsapi.EKS + StackManager NodeGroupStackManager +} + +// Create creates a TaskTree for creating nodegroups. +func (t *UnmanagedNodeGroupTask) Create(ctx context.Context, options CreateNodeGroupOptions) *tasks.TaskTree { + taskTree := &tasks.TaskTree{Parallel: true} + + for _, ng := range t.NodeGroups { + ng := ng + createAccessEntryInStack := ng.IAM.InstanceRoleARN == "" + createNodeGroupTask := &tasks.GenericTask{ + Description: fmt.Sprintf("create nodegroup %q", ng.NameString()), + Doer: func() error { + return t.createNodeGroup(ctx, ng, options, createAccessEntryInStack) + }, + } + + if options.DisableAccessEntryCreation || createAccessEntryInStack { + taskTree.Append(createNodeGroupTask) + } else { + var ngTask tasks.TaskTree + ngTask.Append(createNodeGroupTask) + ngTask.Append(&tasks.GenericTask{ + Description: fmt.Sprintf("create access entry for nodegroup %q", ng.NameString()), + Doer: func() error { + return t.maybeCreateAccessEntry(ctx, ng) + }, + }) + taskTree.Append(&ngTask) + } + } + + return taskTree +} + +func (t *UnmanagedNodeGroupTask) createNodeGroup(ctx context.Context, ng *api.NodeGroup, options CreateNodeGroupOptions, createAccessEntryInStack bool) error { + name := makeNodeGroupStackName(t.ClusterConfig.Metadata.Name, ng.Name) logger.Info("building nodegroup stack %q", name) - bootstrapper, err := nodebootstrap.NewBootstrapper(c.spec, ng) + bootstrapper, err := t.NewBootstrapper(t.ClusterConfig, ng) if err != nil { return errors.Wrap(err, "error creating bootstrapper") } - stack := builder.NewNodeGroupResourceSet(c.ec2API, c.iamAPI, builder.NodeGroupOptions{ - ClusterConfig: c.spec, + + resourceSet := t.CreateNodeGroupResourceSet(builder.NodeGroupOptions{ + ClusterConfig: t.ClusterConfig, NodeGroup: ng, Bootstrapper: bootstrapper, - ForceAddCNIPolicy: forceAddCNIPolicy, - VPCImporter: vpcImporter, - SkipEgressRules: skipEgressRules, - DisableAccessEntryCreation: disableAccessEntryCreation, + ForceAddCNIPolicy: options.ForceAddCNIPolicy, + VPCImporter: options.VPCImporter, + SkipEgressRules: options.SkipEgressRules, + DisableAccessEntry: options.DisableAccessEntryCreation, + DisableAccessEntryResource: !createAccessEntryInStack, }) - if err := stack.AddAllResources(ctx); err != nil { + if err := resourceSet.AddAllResources(ctx); err != nil { return err } @@ -65,7 +141,38 @@ func (c *StackCollection) createNodeGroupTask(ctx context.Context, errs chan err ng.Tags[api.OldNodeGroupNameTag] = ng.Name ng.Tags[api.NodeGroupTypeTag] = string(api.NodeGroupTypeUnmanaged) - return c.CreateStack(ctx, name, stack, ng.Tags, nil, errs) + errCh := make(chan error) + if err := t.StackManager.CreateStack(ctx, name, resourceSet, ng.Tags, nil, errCh); err != nil { + return err + } + return <-errCh +} + +func (t *UnmanagedNodeGroupTask) maybeCreateAccessEntry(ctx context.Context, ng *api.NodeGroup) error { + roleARN := ng.IAM.InstanceRoleARN + _, err := t.EKSAPI.CreateAccessEntry(ctx, &eks.CreateAccessEntryInput{ + ClusterName: aws.String(t.ClusterConfig.Metadata.Name), + PrincipalArn: aws.String(roleARN), + Type: aws.String(string(api.GetAccessEntryType(ng))), + Tags: map[string]string{ + api.ClusterNameLabel: t.ClusterConfig.Metadata.Name, + }, + }) + if err != nil { + var resourceInUse *ekstypes.ResourceInUseException + if errors.As(err, &resourceInUse) { + logger.Info("nodegroup %s: access entry for principal ARN %q already exists", ng.Name, roleARN) + return nil + } + return fmt.Errorf("creating access entry for nodegroup %s: %w", ng.Name, err) + } + logger.Info("nodegroup %s: created access entry for principal ARN %q", ng.Name, roleARN) + return nil +} + +// makeNodeGroupStackName generates the name of the nodegroup stack identified by its name and this StackCollection's cluster. +func (c *StackCollection) makeNodeGroupStackName(ngName string) string { + return makeNodeGroupStackName(c.spec.Metadata.Name, ngName) } func (c *StackCollection) createManagedNodeGroupTask(ctx context.Context, errorCh chan error, ng *api.ManagedNodeGroup, forceAddCNIPolicy bool, vpcImporter vpc.Importer) error { diff --git a/pkg/cfn/manager/tasks.go b/pkg/cfn/manager/tasks.go index 59d719b882..05be123cf8 100644 --- a/pkg/cfn/manager/tasks.go +++ b/pkg/cfn/manager/tasks.go @@ -26,22 +26,6 @@ func (t *createClusterTask) Do(errorCh chan error) error { return t.stackCollection.createClusterTask(t.ctx, errorCh, t.supportsManagedNodes) } -type nodeGroupTask struct { - info string - ctx context.Context - nodeGroup *api.NodeGroup - forceAddCNIPolicy bool - disableAccessEntryCreation bool - skipEgressRules bool - vpcImporter vpc.Importer - stackCollection *StackCollection -} - -func (t *nodeGroupTask) Describe() string { return t.info } -func (t *nodeGroupTask) Do(errs chan error) error { - return t.stackCollection.createNodeGroupTask(t.ctx, errs, t.nodeGroup, t.forceAddCNIPolicy, t.skipEgressRules, t.disableAccessEntryCreation, t.vpcImporter) -} - type managedNodeGroupTask struct { info string nodeGroup *api.ManagedNodeGroup diff --git a/pkg/cfn/manager/tasks_test.go b/pkg/cfn/manager/tasks_test.go index ba1066cbc3..ea81694ae8 100644 --- a/pkg/cfn/manager/tasks_test.go +++ b/pkg/cfn/manager/tasks_test.go @@ -75,30 +75,33 @@ var _ = Describe("StackCollection Tasks", func() { It("should have nice description", func() { fakeVPCImporter := new(vpcfakes.FakeImporter) - accessConfig := &api.AccessConfig{} // TODO use DescribeTable // The supportsManagedNodes argument has no effect on the Describe call, so the values are alternated // in these tests { - tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("bar", "foo"), false, false, false, fakeVPCImporter) + tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("bar", "foo"), false, false, true, fakeVPCImporter) Expect(tasks.Describe()).To(Equal(` 2 parallel tasks: { create nodegroup "bar", create nodegroup "foo" } `)) } { - tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("bar"), false, false, false, fakeVPCImporter) + tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("bar"), false, false, true, fakeVPCImporter) Expect(tasks.Describe()).To(Equal(`1 task: { create nodegroup "bar" }`)) } { - tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("foo"), false, false, false, fakeVPCImporter) + tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("foo"), false, false, true, fakeVPCImporter) Expect(tasks.Describe()).To(Equal(`1 task: { create nodegroup "foo" }`)) } { - tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), nil, false, false, false, fakeVPCImporter) + tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), nil, false, false, true, fakeVPCImporter) Expect(tasks.Describe()).To(Equal(`no tasks`)) } + + accessConfig := &api.AccessConfig{ + AuthenticationMode: ekstypes.AuthenticationModeConfigMap, + } { tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), nil, accessConfig, nil) Expect(tasks.Describe()).To(Equal(` diff --git a/pkg/cfn/manager/unmanaged_nodegroup_test.go b/pkg/cfn/manager/unmanaged_nodegroup_test.go new file mode 100644 index 0000000000..2db8b4cb5b --- /dev/null +++ b/pkg/cfn/manager/unmanaged_nodegroup_test.go @@ -0,0 +1,215 @@ +package manager_test + +import ( + "context" + "fmt" + + "github.com/weaveworks/eksctl/pkg/cfn/manager/mocks" + + "github.com/stretchr/testify/mock" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/weaveworks/eksctl/pkg/nodebootstrap" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/cfn/builder" + "github.com/weaveworks/eksctl/pkg/cfn/manager" + bootstrapfakes "github.com/weaveworks/eksctl/pkg/nodebootstrap/fakes" + "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" +) + +var _ = Describe("Unmanaged Nodegroup Task", func() { + + type resourceSetOptions struct { + nodeGroupName string + disableAccessEntryResource bool + } + + type ngEntry struct { + nodeGroups []*api.NodeGroup + mockCalls func(*mockprovider.MockProvider, *mocks.NodeGroupStackManager) + + expectedResourceSetOptions []resourceSetOptions + + expectedErr string + } + + newNodeGroups := func(instanceRoleARNs ...string) []*api.NodeGroup { + var nodeGroups []*api.NodeGroup + for i, roleARN := range instanceRoleARNs { + ng := api.NewNodeGroup() + ng.Name = fmt.Sprintf("ng-%d", i+1) + ng.IAM.InstanceRoleARN = roleARN + nodeGroups = append(nodeGroups, ng) + } + return nodeGroups + } + + const clusterName = "cluster" + + makeAccessEntryInput := func(roleName string) *eks.CreateAccessEntryInput { + return &eks.CreateAccessEntryInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(roleName), + Type: aws.String("EC2_LINUX"), + Tags: map[string]string{ + api.ClusterNameLabel: clusterName, + }, + } + } + + DescribeTable("Create", func(e ngEntry) { + clusterConfig := api.NewClusterConfig() + clusterConfig.Metadata.Name = clusterName + clusterConfig.NodeGroups = e.nodeGroups + + var ( + provider = mockprovider.NewMockProvider() + stackManager mocks.NodeGroupStackManager + resourceSetArgs []resourceSetOptions + nodeGroupResourceSet mocks.NodeGroupResourceSet + ) + nodeGroupResourceSet.On("AddAllResources", mock.Anything).Return(nil).Times(len(clusterConfig.NodeGroups)) + + t := &manager.UnmanagedNodeGroupTask{ + ClusterConfig: clusterConfig, + NodeGroups: clusterConfig.NodeGroups, + CreateNodeGroupResourceSet: func(options builder.NodeGroupOptions) manager.NodeGroupResourceSet { + resourceSetArgs = append(resourceSetArgs, resourceSetOptions{ + nodeGroupName: options.NodeGroup.Name, + disableAccessEntryResource: options.DisableAccessEntryResource, + }) + return &nodeGroupResourceSet + }, + NewBootstrapper: func(_ *api.ClusterConfig, _ *api.NodeGroup) (nodebootstrap.Bootstrapper, error) { + var bootstrapper bootstrapfakes.FakeBootstrapper + bootstrapper.UserDataReturns("", nil) + return &bootstrapper, nil + }, + EKSAPI: provider.EKS(), + StackManager: &stackManager, + } + + stackManager.On("CreateStack", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.MatchedBy(func(errs chan error) bool { + close(errs) + return true + })).Return(nil) + + if e.mockCalls != nil { + e.mockCalls(provider, &stackManager) + } + + taskTree := t.Create(context.Background(), manager.CreateNodeGroupOptions{ + ForceAddCNIPolicy: false, + SkipEgressRules: false, + DisableAccessEntryCreation: false, + }) + errs := taskTree.DoAllSync() + if e.expectedErr != "" { + Expect(errs).To(ContainElement(MatchError(ContainSubstring(e.expectedErr)))) + return + } + + Expect(errs).To(HaveLen(0)) + Expect(e.expectedResourceSetOptions).To(ConsistOf(resourceSetArgs)) + mock.AssertExpectationsForObjects(GinkgoT(), provider.MockEKS(), &stackManager, &nodeGroupResourceSet) + }, + Entry("nodegroups with no instanceRoleARN set", ngEntry{ + nodeGroups: newNodeGroups("", ""), + expectedResourceSetOptions: []resourceSetOptions{ + { + nodeGroupName: "ng-1", + disableAccessEntryResource: false, + }, + { + nodeGroupName: "ng-2", + disableAccessEntryResource: false, + }, + }, + }), + + Entry("some nodegroups with instanceRoleARN set", ngEntry{ + nodeGroups: newNodeGroups("", "role-1", ""), + mockCalls: func(provider *mockprovider.MockProvider, stackManager *mocks.NodeGroupStackManager) { + provider.MockEKS().On("CreateAccessEntry", mock.Anything, makeAccessEntryInput("role-1")).Return(&eks.CreateAccessEntryOutput{}, nil) + }, + expectedResourceSetOptions: []resourceSetOptions{ + { + nodeGroupName: "ng-1", + disableAccessEntryResource: false, + }, + { + nodeGroupName: "ng-2", + disableAccessEntryResource: true, + }, + { + nodeGroupName: "ng-3", + disableAccessEntryResource: false, + }, + }, + }), + + Entry("all nodegroups with instanceRoleARN set", ngEntry{ + nodeGroups: newNodeGroups("role-1", "role-2"), + mockCalls: func(provider *mockprovider.MockProvider, stackManager *mocks.NodeGroupStackManager) { + for _, role := range []string{"role-1", "role-2"} { + provider.MockEKS().On("CreateAccessEntry", mock.Anything, makeAccessEntryInput(role)).Return(&eks.CreateAccessEntryOutput{}, nil).Once() + } + + }, + expectedResourceSetOptions: []resourceSetOptions{ + { + nodeGroupName: "ng-1", + disableAccessEntryResource: true, + }, + { + nodeGroupName: "ng-2", + disableAccessEntryResource: true, + }, + }, + }), + + Entry("all nodegroups with the same instanceRoleARN", ngEntry{ + nodeGroups: newNodeGroups("role-3", "role-3"), + mockCalls: func(provider *mockprovider.MockProvider, stackManager *mocks.NodeGroupStackManager) { + provider.MockEKS().On("CreateAccessEntry", mock.Anything, makeAccessEntryInput("role-3")).Return(&eks.CreateAccessEntryOutput{}, nil).Once() + provider.MockEKS().On("CreateAccessEntry", mock.Anything, makeAccessEntryInput("role-3")).Return(nil, &ekstypes.ResourceInUseException{ + ClusterName: aws.String(clusterName), + }).Once() + + }, + expectedResourceSetOptions: []resourceSetOptions{ + { + nodeGroupName: "ng-1", + disableAccessEntryResource: true, + }, + { + nodeGroupName: "ng-2", + disableAccessEntryResource: true, + }, + }, + }), + + Entry("single nodegroup with a pre-existing access entry", ngEntry{ + nodeGroups: newNodeGroups("role-3"), + mockCalls: func(provider *mockprovider.MockProvider, stackManager *mocks.NodeGroupStackManager) { + provider.MockEKS().On("CreateAccessEntry", mock.Anything, makeAccessEntryInput("role-3")).Return(&eks.CreateAccessEntryOutput{}, &ekstypes.ResourceInUseException{ + ClusterName: aws.String(clusterName), + }).Once() + + }, + expectedResourceSetOptions: []resourceSetOptions{ + { + nodeGroupName: "ng-1", + disableAccessEntryResource: true, + }, + }, + }), + ) + +}) diff --git a/pkg/ctl/create/cluster_test.go b/pkg/ctl/create/cluster_test.go index b67d6b8a76..4c2d388771 100644 --- a/pkg/ctl/create/cluster_test.go +++ b/pkg/ctl/create/cluster_test.go @@ -327,9 +327,7 @@ var _ = Describe("create cluster", func() { updateClusterParams: func(params *cmdutils.CreateClusterCmdParams) { params.InstallNvidiaDevicePlugin = true }, - updateMocks: func(p *mockprovider.MockProvider) { - updateMocksForNodegroups(cftypes.StackStatusCreateComplete, defaultOutputForNodeGroup)(p) - }, + updateMocks: updateMocksForNodegroups(cftypes.StackStatusCreateComplete, defaultOutputForNodeGroup), updateKubeProvider: func(fk *fakes.FakeKubeProvider) { rawClient, err := kubernetes.NewRawClient(kubefake.NewSimpleClientset(), &rest.Config{}) Expect(err).To(Not(HaveOccurred())) @@ -454,6 +452,41 @@ var _ = Describe("create cluster", func() { }, }), + Entry("nodegroup with an instance role ARN", createClusterEntry{ + updateClusterConfig: func(c *api.ClusterConfig) { + ng := getDefaultNodeGroup() + ng.IAM.InstanceRoleARN = "role-1" + c.NodeGroups = []*api.NodeGroup{ng} + }, + updateMocks: func(mp *mockprovider.MockProvider) { + updateMocksForNodegroups(cftypes.StackStatusCreateComplete, defaultOutputForNodeGroup)(mp) + mp.MockEKS().On("DescribeAccessEntry", mock.Anything, &awseks.DescribeAccessEntryInput{ + PrincipalArn: aws.String("role-1"), + ClusterName: aws.String(clusterName), + }).Return(nil, &ekstypes.ResourceNotFoundException{ClusterName: aws.String(clusterName)}).Once() + + mp.MockEKS().On("CreateAccessEntry", mock.Anything, &awseks.CreateAccessEntryInput{ + PrincipalArn: aws.String("role-1"), + ClusterName: aws.String(clusterName), + Type: aws.String("EC2_LINUX"), + Tags: map[string]string{ + api.ClusterNameLabel: clusterName, + }, + }).Return(&awseks.CreateAccessEntryOutput{}, nil).Once() + }, + updateKubeProvider: func(fk *fakes.FakeKubeProvider) { + node := getDefaultNode() + clientset := kubefake.NewSimpleClientset(node) + watcher := watch.NewFake() + go func() { + defer watcher.Stop() + watcher.Add(node) + }() + clientset.PrependWatchReactor("nodes", k8stest.DefaultWatchReactor(watcher, nil)) + fk.NewStdClientSetReturns(clientset, nil) + }, + }), + Entry("standard cluster", createClusterEntry{}), Entry("[Cluster with Karpenter] installs Karpenter successfully", createClusterEntry{ @@ -720,15 +753,16 @@ var _ = Describe("create cluster", func() { }) var ( - clusterName = "my-cluster" - clusterStackName = "eksctl-" + clusterName + "-cluster" - nodeGroupName = "my-nodegroup" - nodeGroupStackName = "eksctl-" + clusterName + "-nodegroup-" + nodeGroupName + clusterName = "my-cluster" + clusterStackName = "eksctl-" + clusterName + "-cluster" + nodeGroupName = "my-nodegroup" + nodeGroupStackName = "eksctl-" + clusterName + "-nodegroup-" + nodeGroupName + nodeInstanceRoleARN = "arn:aws:iam::083751696308:role/eksctl-my-cluster-cluster-nodegroup-my-nodegroup-NodeInstanceRole-1IYQ3JS8OKPX1" defaultOutputForNodeGroup = []cftypes.Output{ { OutputKey: aws.String(outputs.NodeGroupInstanceRoleARN), - OutputValue: aws.String("arn:aws:iam::083751696308:role/eksctl-my-cluster-cluster-nodegroup-my-nodegroup-NodeInstanceRole-1IYQ3JS8OKPX1"), + OutputValue: aws.String(nodeInstanceRoleARN), }, { OutputKey: aws.String(outputs.NodeGroupInstanceProfileARN), @@ -887,7 +921,7 @@ var ( Outputs: outputs, }, }, - }, nil).Once() + }, nil).Twice() } } ) diff --git a/pkg/ctl/delete/nodegroup.go b/pkg/ctl/delete/nodegroup.go index dd86296639..e35277ae10 100644 --- a/pkg/ctl/delete/nodegroup.go +++ b/pkg/ctl/delete/nodegroup.go @@ -164,7 +164,7 @@ func doDeleteNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, options deleteNodeG cmdutils.LogIntendedAction(cmd.Plan, "delete %d nodegroups from cluster %q", len(allNodeGroups), cfg.Metadata.Name) deleter := &nodegroup.Deleter{ - StackHelper: ctl.NewStackManager(cfg), + StackHelper: stackManager, NodeGroupDeleter: ctl.AWSProvider.EKS(), ClusterName: cfg.Metadata.Name, AuthConfigMapUpdater: &authConfigMapUpdater{ diff --git a/userdocs/src/usage/access-entries.md b/userdocs/src/usage/access-entries.md index f0d6506556..efac8fab29 100644 --- a/userdocs/src/usage/access-entries.md +++ b/userdocs/src/usage/access-entries.md @@ -22,7 +22,7 @@ e.g. ```yaml accessConfig: - authenticationMode: <> + authenticationMode: <> ``` When creating a new cluster with access entries, using `eksctl`, if `authenticationMode` is not provided by the user, it is automatically set to `API_AND_CONFIG_MAP`. Thus, the access entries API will be enabled by default. If instead you want to use access entries on an already existing, non-eksctl created, cluster, where `CONFIG_MAP` option is used, the user will need to first set `authenticationMode` to `API_AND_CONFIG_MAP`. For that, `eksctl` has introduced a new command for updating the cluster authentication mode, which works both with CLI flags e.g. @@ -85,6 +85,9 @@ Each access entry has a type. For authorizing self-managed nodegroups, `eksctl` When creating your own access entries, you can also specify `EC2_LINUX` (for an IAM role used with Linux or Bottlerocket self-managed nodes), `EC2_WINDOWS` (for an IAM roles used with Windows self-managed nodes), `FARGATE_LINUX` (for an IAM roles used with AWS Fargate (Fargate)), or `STANDARD` as a type. If you don't specify a type, the default type is set to `STANDARD`. +???+ note + When deleting a nodegroup created with a pre-existing `instanceRoleARN`, it is the user's responsibility to delete the corresponding access entry when no more nodegroups are associated with it. This is because eksctl does not attempt to find out if an access entry is still in use by non-eksctl created self-managed nodegroups as it is a complicated process. + ## Managing access entries ### Create access entries