diff --git a/go.mod b/go.mod index fd6b4fac5c..c416789a65 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect github.com/vektra/mockery v1.1.2 github.com/voxelbrain/goptions v0.0.0-20180630082107-58cddc247ea2 // indirect - github.com/weaveworks/goformation/v4 v4.10.2-0.20210512074147-4fd9b878d08c + github.com/weaveworks/goformation/v4 v4.10.2-0.20210512152024-6bd11e940025 github.com/weaveworks/launcher v0.0.2-0.20200715141516-1ca323f1de15 github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0 golang.org/x/tools v0.1.0 diff --git a/go.sum b/go.sum index afb34b94ed..fbf6e2d5db 100644 --- a/go.sum +++ b/go.sum @@ -1412,8 +1412,8 @@ github.com/voxelbrain/goptions v0.0.0-20180630082107-58cddc247ea2 h1:txplJASvd6b github.com/voxelbrain/goptions v0.0.0-20180630082107-58cddc247ea2/go.mod h1:DGCIhurYgnLz8J9ga1fMV/fbLDyUvTyrWXVWUIyJon4= github.com/weaveworks/goformation/v4 v4.10.2-0.20210510110359-567e61bbcf4c h1:vFyc7sCxT0Pcj5wPH8uR3dudRe0MSCQXBTMolLmquCU= github.com/weaveworks/goformation/v4 v4.10.2-0.20210510110359-567e61bbcf4c/go.mod h1:x92o12+Azh6DQ4yoXT5oEuE7dhQHR5V2vy/fmZ6pO7k= -github.com/weaveworks/goformation/v4 v4.10.2-0.20210512074147-4fd9b878d08c h1:Qh6SvtJbxURTa9nvljYGDxp5j8iVEF6za6LbkamD7Z8= -github.com/weaveworks/goformation/v4 v4.10.2-0.20210512074147-4fd9b878d08c/go.mod h1:x92o12+Azh6DQ4yoXT5oEuE7dhQHR5V2vy/fmZ6pO7k= +github.com/weaveworks/goformation/v4 v4.10.2-0.20210512152024-6bd11e940025 h1:XX7Ch1ZuYef1D+M7EOr2tKc/8Y7quheNQJK36SMFWqw= +github.com/weaveworks/goformation/v4 v4.10.2-0.20210512152024-6bd11e940025/go.mod h1:x92o12+Azh6DQ4yoXT5oEuE7dhQHR5V2vy/fmZ6pO7k= github.com/weaveworks/launcher v0.0.2-0.20200715141516-1ca323f1de15 h1:i/RhLevywqC6cuUWtGdoaNrsJd+/zWh3PXbkXZIyZsU= github.com/weaveworks/launcher v0.0.2-0.20200715141516-1ca323f1de15/go.mod h1:w9Z1vnQmPobkEZ0F3oyiqRYP+62qDqTGnK6t5uhe1kg= github.com/weaveworks/mesh v0.0.0-20170419100114-1f158d31de55/go.mod h1:mcON9Ws1aW0crSErpXWp7U1ErCDEKliDX2OhVlbWRKk= diff --git a/pkg/actions/cluster/owned.go b/pkg/actions/cluster/owned.go index 4d5ab9f1b4..e0dfd09167 100644 --- a/pkg/actions/cluster/owned.go +++ b/pkg/actions/cluster/owned.go @@ -57,7 +57,8 @@ func (c *OwnedCluster) Upgrade(dryRun bool) error { return err } - if err := c.ctl.ValidateExistingNodeGroupsForCompatibility(c.cfg, c.stackManager); err != nil { + nodeGroupService := eks.NodeGroupService{Provider: c.ctl.Provider} + if err := nodeGroupService.ValidateExistingNodeGroupsForCompatibility(c.cfg, c.stackManager); err != nil { logger.Critical("failed checking nodegroups", err.Error()) } diff --git a/pkg/actions/nodegroup/create.go b/pkg/actions/nodegroup/create.go index aa0fe605fc..ba8f8b5891 100644 --- a/pkg/actions/nodegroup/create.go +++ b/pkg/actions/nodegroup/create.go @@ -5,7 +5,6 @@ import ( "os" "strings" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector" "github.com/kris-nova/logger" "github.com/pkg/errors" @@ -20,7 +19,6 @@ import ( "github.com/weaveworks/eksctl/pkg/vpc" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" - "github.com/weaveworks/eksctl/pkg/authconfigmap" "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" ) @@ -34,24 +32,22 @@ type CreateOpts struct { ConfigFileProvided bool } -func (m *Manager) Create(options CreateOpts, nodegroupFilter filter.NodeGroupFilter) error { +func (m *Manager) Create(options CreateOpts, nodegroupFilter filter.NodegroupFilter) error { cfg := m.cfg meta := cfg.Metadata ctl := m.ctl - - // For dry-run. - clusterConfigCopy := cfg.DeepCopy() + kubeProvider := m.kubeProvider if err := checkVersion(ctl, meta); err != nil { return err } - if err := checkARMSupport(ctl, m.clientSet, cfg, options.SkipOutdatedAddonsCheck); err != nil { + if err := m.checkARMSupport(ctl, m.clientSet, cfg, options.SkipOutdatedAddonsCheck); err != nil { return err } var isOwnedCluster = true - if err := ctl.LoadClusterIntoSpecFromStack(cfg, m.stackManager); err != nil { + if err := kubeProvider.LoadClusterIntoSpecFromStack(cfg, m.stackManager); err != nil { switch e := err.(type) { case *manager.StackNotFoundErr: logger.Warning("%s, will attempt to create nodegroup(s) on non eksctl-managed cluster", e.Error()) @@ -66,7 +62,7 @@ func (m *Manager) Create(options CreateOpts, nodegroupFilter filter.NodeGroupFil } // EKS 1.14 clusters created with prior versions of eksctl may not support Managed Nodes - supportsManagedNodes, err := ctl.SupportsManagedNodes(cfg) + supportsManagedNodes, err := kubeProvider.SupportsManagedNodes(cfg) if err != nil { return err } @@ -79,14 +75,15 @@ func (m *Manager) Create(options CreateOpts, nodegroupFilter filter.NodeGroupFil return err } - nodeGroupService := eks.NewNodeGroupService(ctl.Provider, selector.New(ctl.Provider.Session())) + m.init.NewAWSSelectorSession(ctl.Provider) nodePools := cmdutils.ToNodePools(cfg) - if err := nodeGroupService.ExpandInstanceSelectorOptions(nodePools, cfg.AvailabilityZones); err != nil { + + if err := m.init.ExpandInstanceSelectorOptions(nodePools, cfg.AvailabilityZones); err != nil { return err } if !options.DryRun { - if err := nodeGroupService.Normalize(nodePools, cfg.Metadata); err != nil { + if err := m.init.Normalize(nodePools, cfg.Metadata); err != nil { return err } } @@ -97,100 +94,102 @@ func (m *Manager) Create(options CreateOpts, nodegroupFilter filter.NodeGroupFil } if isOwnedCluster { - if err := ctl.ValidateClusterForCompatibility(cfg, m.stackManager); err != nil { + if err := kubeProvider.ValidateClusterForCompatibility(cfg, m.stackManager); err != nil { return errors.Wrap(err, "cluster compatibility check failed") } } - if err := vpc.ValidateLegacySubnetsForNodeGroups(cfg, ctl.Provider); err != nil { + if err := m.init.ValidateLegacySubnetsForNodeGroups(cfg, ctl.Provider); err != nil { return err } - { - if err := nodegroupFilter.SetOnlyLocal(ctl.Provider.EKS(), m.stackManager, cfg); err != nil { - return err - } + if err := m.nodeCreationTasks(options, nodegroupFilter, supportsManagedNodes, isOwnedCluster); err != nil { + return err + } - logFiltered := cmdutils.ApplyFilter(cfg, &nodegroupFilter) - logFiltered() - logMsg := func(resource string, count int) { - logger.Info("will create a CloudFormation stack for each of %d %s in cluster %q", count, resource, meta.Name) - } - if len(cfg.NodeGroups) > 0 { - logMsg("nodegroups", len(cfg.NodeGroups)) - } + if err := m.postNodeCreationTasks(m.clientSet, options); err != nil { + return err + } - if len(cfg.ManagedNodeGroups) > 0 { - logMsg("managed nodegroups", len(cfg.ManagedNodeGroups)) - } + if err := m.init.ValidateExistingNodeGroupsForCompatibility(cfg, m.stackManager); err != nil { + logger.Critical("failed checking nodegroups", err.Error()) + } - if options.DryRun { - // Set filtered nodegroups - clusterConfigCopy.NodeGroups = cfg.NodeGroups - clusterConfigCopy.ManagedNodeGroups = cfg.ManagedNodeGroups - if options.ConfigFileProvided { - return cmdutils.PrintDryRunConfig(clusterConfigCopy, os.Stdout) - } - return cmdutils.PrintNodeGroupDryRunConfig(clusterConfigCopy, os.Stdout) - } + return nil +} - taskTree := &tasks.TaskTree{ - Parallel: false, - } +func (m *Manager) nodeCreationTasks(options CreateOpts, nodegroupFilter filter.NodegroupFilter, supportsManagedNodes, isOwnedCluster bool) error { + cfg := m.cfg + meta := cfg.Metadata + init := m.init - if supportsManagedNodes && isOwnedCluster { - taskTree.Append(m.stackManager.NewClusterCompatTask()) - } + if err := nodegroupFilter.SetOnlyLocal(m.ctl.Provider.EKS(), m.stackManager, cfg); err != nil { + return err + } - awsNodeUsesIRSA, err := eks.DoesAWSNodeUseIRSA(ctl.Provider, m.clientSet) - if err != nil { - return errors.Wrap(err, "couldn't check aws-node for annotation") - } + logFiltered := cmdutils.ApplyFilter(cfg, nodegroupFilter) + logFiltered() + logMsg := func(resource string, count int) { + logger.Info("will create a CloudFormation stack for each of %d %s in cluster %q", count, resource, meta.Name) + } + if len(m.cfg.NodeGroups) > 0 { + logMsg("nodegroups", len(cfg.NodeGroups)) + } - if !awsNodeUsesIRSA && api.IsEnabled(cfg.IAM.WithOIDC) { - logger.Debug("cluster has withOIDC enabled but is not using IRSA for CNI, will add CNI policy to node role") - } + if len(m.cfg.ManagedNodeGroups) > 0 { + logMsg("managed nodegroups", len(cfg.ManagedNodeGroups)) + } - var vpcImporter vpc.Importer - if isOwnedCluster { - vpcImporter = vpc.NewStackConfigImporter(m.stackManager.MakeClusterStackName()) - } else { - vpcImporter = vpc.NewSpecConfigImporter(*ctl.Status.ClusterInfo.Cluster.ResourcesVpcConfig.ClusterSecurityGroupId, cfg.VPC) + if options.DryRun { + clusterConfigCopy := cfg.DeepCopy() + // Set filtered nodegroups + clusterConfigCopy.NodeGroups = cfg.NodeGroups + clusterConfigCopy.ManagedNodeGroups = cfg.ManagedNodeGroups + if options.ConfigFileProvided { + return cmdutils.PrintDryRunConfig(clusterConfigCopy, os.Stdout) } + return cmdutils.PrintNodeGroupDryRunConfig(clusterConfigCopy, os.Stdout) + } - allNodeGroupTasks := &tasks.TaskTree{ - Parallel: true, - } - nodeGroupTasks := m.stackManager.NewUnmanagedNodeGroupTask(cfg.NodeGroups, supportsManagedNodes, !awsNodeUsesIRSA, vpcImporter) - if nodeGroupTasks.Len() > 0 { - allNodeGroupTasks.Append(nodeGroupTasks) - } - managedTasks := m.stackManager.NewManagedNodeGroupTask(cfg.ManagedNodeGroups, !awsNodeUsesIRSA, vpcImporter) - if managedTasks.Len() > 0 { - allNodeGroupTasks.Append(managedTasks) - } + taskTree := &tasks.TaskTree{ + Parallel: false, + } - taskTree.Append(allNodeGroupTasks) - logger.Info(taskTree.Describe()) - errs := taskTree.DoAllSync() - if len(errs) > 0 { - logger.Info("%d error(s) occurred and nodegroups haven't been created properly, you may wish to check CloudFormation console", len(errs)) - logger.Info("to cleanup resources, run 'eksctl delete nodegroup --region=%s --cluster=%s --name=' for each of the failed nodegroup", meta.Region, meta.Name) - for _, err := range errs { - if err != nil { - logger.Critical("%s\n", err.Error()) - } - } - return fmt.Errorf("failed to create nodegroups for cluster %q", meta.Name) - } + if supportsManagedNodes && isOwnedCluster { + taskTree.Append(m.stackManager.NewClusterCompatTask()) } - if err := m.postNodeCreationTasks(m.clientSet, options); err != nil { - return err + awsNodeUsesIRSA, err := init.DoesAWSNodeUseIRSA(m.ctl.Provider, m.clientSet) + if err != nil { + return errors.Wrap(err, "couldn't check aws-node for annotation") } - if err := ctl.ValidateExistingNodeGroupsForCompatibility(cfg, m.stackManager); err != nil { - logger.Critical("failed checking nodegroups", err.Error()) + if !awsNodeUsesIRSA && api.IsEnabled(cfg.IAM.WithOIDC) { + logger.Debug("cluster has withOIDC enabled but is not using IRSA for CNI, will add CNI policy to node role") + } + + var vpcImporter vpc.Importer + if isOwnedCluster { + vpcImporter = vpc.NewStackConfigImporter(m.stackManager.MakeClusterStackName()) + } else { + vpcImporter = vpc.NewSpecConfigImporter(*m.ctl.Status.ClusterInfo.Cluster.ResourcesVpcConfig.ClusterSecurityGroupId, cfg.VPC) + } + + allNodeGroupTasks := &tasks.TaskTree{ + Parallel: true, + } + nodeGroupTasks := m.stackManager.NewUnmanagedNodeGroupTask(cfg.NodeGroups, supportsManagedNodes, !awsNodeUsesIRSA, vpcImporter) + if nodeGroupTasks.Len() > 0 { + allNodeGroupTasks.Append(nodeGroupTasks) + } + managedTasks := m.stackManager.NewManagedNodeGroupTask(cfg.ManagedNodeGroups, !awsNodeUsesIRSA, vpcImporter) + if managedTasks.Len() > 0 { + allNodeGroupTasks.Append(managedTasks) + } + + taskTree.Append(allNodeGroupTasks) + if err := m.init.DoAllNodegroupStackTasks(taskTree, meta.Region, meta.Name); err != nil { + return err } return nil @@ -212,22 +211,14 @@ func (m *Manager) postNodeCreationTasks(clientSet kubernetes.Interface, options } if options.UpdateAuthConfigMap { - for _, ng := range m.cfg.NodeGroups { - // authorise nodes to join - if err := authconfigmap.AddNodeGroup(clientSet, ng); err != nil { - return err - } - - // wait for nodes to join - if err := m.ctl.WaitForNodes(clientSet, ng); err != nil { - return err - } + if err := m.kubeProvider.UpdateAuthConfigMap(m.cfg.NodeGroups, clientSet); err != nil { + return err } } logger.Success("created %d nodegroup(s) in cluster %q", len(m.cfg.NodeGroups), m.cfg.Metadata.Name) for _, ng := range m.cfg.ManagedNodeGroups { - if err := m.ctl.WaitForNodes(clientSet, ng); err != nil { + if err := m.kubeProvider.WaitForNodes(clientSet, ng); err != nil { if m.cfg.PrivateCluster.Enabled { logger.Info("error waiting for nodes to join the cluster; this command was likely run from outside the cluster's VPC as the API server is not reachable, nodegroup(s) should still be able to join the cluster, underlying error is: %v", err) break @@ -275,13 +266,14 @@ func checkVersion(ctl *eks.ClusterProvider, meta *api.ClusterMeta) error { return nil } -func checkARMSupport(ctl *eks.ClusterProvider, clientSet kubernetes.Interface, cfg *api.ClusterConfig, skipOutdatedAddonsCheck bool) error { - rawClient, err := ctl.NewRawClient(cfg) +func (m *Manager) checkARMSupport(ctl *eks.ClusterProvider, clientSet kubernetes.Interface, cfg *api.ClusterConfig, skipOutdatedAddonsCheck bool) error { + kubeProvider := m.kubeProvider + rawClient, err := kubeProvider.NewRawClient(cfg) if err != nil { return err } - kubernetesVersion, err := rawClient.ServerVersion() + kubernetesVersion, err := kubeProvider.ServerVersion(rawClient) if err != nil { return err } diff --git a/pkg/actions/nodegroup/create_test.go b/pkg/actions/nodegroup/create_test.go new file mode 100644 index 0000000000..3e4ae34f72 --- /dev/null +++ b/pkg/actions/nodegroup/create_test.go @@ -0,0 +1,290 @@ +package nodegroup_test + +import ( + "fmt" + "strings" + + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + + "github.com/pkg/errors" + "github.com/weaveworks/eksctl/pkg/actions/nodegroup" + api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/cfn/manager" + utilFakes "github.com/weaveworks/eksctl/pkg/ctl/cmdutils/filter/fakes" + "github.com/weaveworks/eksctl/pkg/eks" + "github.com/weaveworks/eksctl/pkg/eks/fakes" + "github.com/weaveworks/eksctl/pkg/kubernetes" + "github.com/weaveworks/eksctl/pkg/testutils" + "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" +) + +type ngEntry struct { + version string + opts nodegroup.CreateOpts + mockCalls func(*fakes.FakeKubeProvider, *fakes.FakeNodeGroupInitialiser, *utilFakes.FakeNodegroupFilter) + expErr error +} + +var _ = DescribeTable("Create", func(t ngEntry) { + cfg := newClusterConfig() + cfg.Metadata.Version = t.version + + p := mockprovider.NewMockProvider() + ctl := &eks.ClusterProvider{ + Provider: p, + Status: &eks.ProviderStatus{ + ClusterInfo: &eks.ClusterInfo{ + Cluster: testutils.NewFakeCluster("my-cluster", ""), + }, + }, + } + m := nodegroup.New(cfg, ctl, nil) + + k := &fakes.FakeKubeProvider{} + m.MockKubeProvider(k) + + init := &fakes.FakeNodeGroupInitialiser{} + m.MockNodeGroupService(init) + + ngFilter := utilFakes.FakeNodegroupFilter{} + if t.mockCalls != nil { + t.mockCalls(k, init, &ngFilter) + } + + err := m.Create(t.opts, &ngFilter) + if err != nil { + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring(t.expErr.Error()))) + return + } + + Expect(err).NotTo(HaveOccurred()) +}, + Entry("fails when cluster version is not supported", ngEntry{ + version: "1.14", + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + }, + expErr: fmt.Errorf("invalid version, %s is no longer supported, supported values: auto, default, latest, %s\nsee also: https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html", "1.14", strings.Join(api.SupportedVersions(), ", ")), + }), + + Entry("fails when it does not support ARM", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(nil, fmt.Errorf("err")) + }, + expErr: fmt.Errorf("err"), + }), + + Entry("when cluster is unowned, fails to load VPC from config if config is not supplied", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(&manager.StackNotFoundErr{}) + }, + expErr: errors.Wrapf(errors.New("VPC configuration required for creating nodegroups on clusters not owned by eksctl: vpc.subnets, vpc.id, vpc.securityGroup"), "loading VPC spec for cluster %q", "my-cluster"), + }), + + Entry("fails when cluster does not support managed nodes", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(false, errors.New("err")) + }, + expErr: errors.New("err"), + }), + + Entry("fails to set instance types to instances matched by instance selector criteria", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(errors.New("err")) + }, + expErr: errors.New("err"), + }), + + Entry("fails when cluster is not compatible with ng config", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(nil) + k.ValidateClusterForCompatibilityReturns(errors.New("err")) + }, + expErr: errors.Wrap(errors.New("err"), "cluster compatibility check failed"), + }), + + Entry("fails when it cannot validate legacy subnets for ng", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(nil) + k.ValidateClusterForCompatibilityReturns(nil) + init.ValidateLegacySubnetsForNodeGroupsReturns(errors.New("err")) + }, + expErr: errors.New("err"), + }), + + Entry("fails when existing local ng stacks in config file is not listed", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(nil) + k.ValidateClusterForCompatibilityReturns(nil) + f.SetOnlyLocalReturns(errors.New("err")) + }, + expErr: errors.New("err"), + }), + + Entry("fails to evaluate whether aws-node uses IRSA", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(nil) + k.ValidateClusterForCompatibilityReturns(nil) + f.SetOnlyLocalReturns(nil) + init.DoesAWSNodeUseIRSAReturns(true, errors.New("err")) + }, + expErr: errors.New("err"), + }), + + Entry("stack manager fails to do ng tasks", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(nil) + k.ValidateClusterForCompatibilityReturns(nil) + f.SetOnlyLocalReturns(nil) + init.DoesAWSNodeUseIRSAReturns(false, nil) + init.DoAllNodegroupStackTasksReturns(errors.New("err")) + }, + expErr: errors.New("err"), + }), + + Entry("fails to update auth configmap", ngEntry{ + opts: nodegroup.CreateOpts{ + DryRun: true, + UpdateAuthConfigMap: true, + }, + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(nil) + k.ValidateClusterForCompatibilityReturns(nil) + f.SetOnlyLocalReturns(nil) + init.DoesAWSNodeUseIRSAReturns(false, nil) + init.DoAllNodegroupStackTasksReturns(nil) + k.UpdateAuthConfigMapReturns(errors.New("err")) + }, + expErr: errors.New("err"), + }), + + Entry("fails to validate existing ng for compatibility", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(nil) + k.ValidateClusterForCompatibilityReturns(nil) + f.SetOnlyLocalReturns(nil) + init.DoesAWSNodeUseIRSAReturns(false, nil) + init.DoAllNodegroupStackTasksReturns(nil) + init.ValidateExistingNodeGroupsForCompatibilityReturns(errors.New("err")) + }, + expErr: errors.New("err"), + }), + + Entry("[happy path] creates nodegroup with no options", ngEntry{ + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(nil) + k.ValidateClusterForCompatibilityReturns(nil) + f.SetOnlyLocalReturns(nil) + init.DoesAWSNodeUseIRSAReturns(false, nil) + init.DoAllNodegroupStackTasksReturns(nil) + init.ValidateExistingNodeGroupsForCompatibilityReturns(nil) + }, + expErr: nil, + }), + + Entry("[happy path] creates nodegroup with all the options", ngEntry{ + opts: nodegroup.CreateOpts{ + DryRun: true, + UpdateAuthConfigMap: true, + InstallNeuronDevicePlugin: true, + InstallNvidiaDevicePlugin: true, + SkipOutdatedAddonsCheck: true, + ConfigFileProvided: true, + }, + mockCalls: func(k *fakes.FakeKubeProvider, init *fakes.FakeNodeGroupInitialiser, f *utilFakes.FakeNodegroupFilter) { + k.NewRawClientReturns(&kubernetes.RawClient{}, nil) + k.ServerVersionReturns("1.17", nil) + k.LoadClusterIntoSpecFromStackReturns(nil) + k.SupportsManagedNodesReturns(true, nil) + init.NewAWSSelectorSessionReturns(nil) + init.ExpandInstanceSelectorOptionsReturns(nil) + k.ValidateClusterForCompatibilityReturns(nil) + f.SetOnlyLocalReturns(nil) + init.DoesAWSNodeUseIRSAReturns(false, nil) + init.DoAllNodegroupStackTasksReturns(nil) + k.UpdateAuthConfigMapReturns(nil) + }, + expErr: nil, + }), +) + +func newClusterConfig() *api.ClusterConfig { + return &api.ClusterConfig{ + TypeMeta: api.ClusterConfigTypeMeta(), + Metadata: &api.ClusterMeta{ + Name: "my-cluster", + Version: api.DefaultVersion, + }, + Status: &api.ClusterStatus{ + Endpoint: "https://localhost/", + CertificateAuthorityData: []byte("dGVzdAo="), + }, + IAM: api.NewClusterIAM(), + VPC: api.NewClusterVPC(), + CloudWatch: &api.ClusterCloudWatch{ + ClusterLogging: &api.ClusterCloudWatchLogging{}, + }, + PrivateCluster: &api.PrivateCluster{}, + NodeGroups: []*api.NodeGroup{{ + NodeGroupBase: &api.NodeGroupBase{ + Name: "my-ng", + }}, + }, + ManagedNodeGroups: []*api.ManagedNodeGroup{{ + NodeGroupBase: &api.NodeGroupBase{ + Name: "my-ng", + }}, + }, + } +} diff --git a/pkg/actions/nodegroup/export_test.go b/pkg/actions/nodegroup/export_test.go index 2e7a5c8982..486cfb69af 100644 --- a/pkg/actions/nodegroup/export_test.go +++ b/pkg/actions/nodegroup/export_test.go @@ -1,6 +1,9 @@ package nodegroup -import "github.com/weaveworks/eksctl/pkg/cfn/manager" +import ( + "github.com/weaveworks/eksctl/pkg/cfn/manager" + "github.com/weaveworks/eksctl/pkg/eks" +) func (m *Manager) SetWaiter(wait WaitFunc) { m.wait = wait @@ -9,3 +12,13 @@ func (m *Manager) SetWaiter(wait WaitFunc) { func (m *Manager) SetStackManager(stackManager manager.StackManager) { m.stackManager = stackManager } + +// MockKubeProvider can be used for passing a mock of the kube provider. +func (m *Manager) MockKubeProvider(k eks.KubeProvider) { + m.kubeProvider = k +} + +// MockNodeGroupService can be used for passing a mock of the nodegroup initialiser. +func (m *Manager) MockNodeGroupService(ngSvc eks.NodeGroupInitialiser) { + m.init = ngSvc +} diff --git a/pkg/actions/nodegroup/nodegroup.go b/pkg/actions/nodegroup/nodegroup.go index de754b1b8c..a073b0c06b 100644 --- a/pkg/actions/nodegroup/nodegroup.go +++ b/pkg/actions/nodegroup/nodegroup.go @@ -18,6 +18,8 @@ type Manager struct { cfg *api.ClusterConfig clientSet *kubernetes.Clientset wait WaitFunc + init eks.NodeGroupInitialiser + kubeProvider eks.KubeProvider } type WaitFunc func(name, msg string, acceptors []request.WaiterAcceptor, newRequest func() *request.Request, waitTimeout time.Duration, troubleshoot func(string) error) error @@ -29,6 +31,10 @@ func New(cfg *api.ClusterConfig, ctl *eks.ClusterProvider, clientSet *kubernetes cfg: cfg, clientSet: clientSet, wait: waiters.Wait, + init: &eks.NodeGroupService{ + Provider: ctl.Provider, + }, + kubeProvider: ctl, } } diff --git a/pkg/ctl/cmdutils/cluster.go b/pkg/ctl/cmdutils/cluster.go index ed46067825..6b2674fdc3 100644 --- a/pkg/ctl/cmdutils/cluster.go +++ b/pkg/ctl/cmdutils/cluster.go @@ -7,7 +7,7 @@ import ( ) // ApplyFilter applies nodegroup filters and returns a log function -func ApplyFilter(clusterConfig *api.ClusterConfig, ngFilter *filter.NodeGroupFilter) func() { +func ApplyFilter(clusterConfig *api.ClusterConfig, ngFilter filter.NodegroupFilter) func() { var ( filteredNodeGroups []*api.NodeGroup filteredManagedNodeGroups []*api.ManagedNodeGroup diff --git a/pkg/ctl/cmdutils/filter/fakes/fake_nodegroup_filter.go b/pkg/ctl/cmdutils/filter/fakes/fake_nodegroup_filter.go new file mode 100644 index 0000000000..5f15235206 --- /dev/null +++ b/pkg/ctl/cmdutils/filter/fakes/fake_nodegroup_filter.go @@ -0,0 +1,230 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fakes + +import ( + "sync" + + "github.com/aws/aws-sdk-go/service/eks/eksiface" + "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils/filter" +) + +type FakeNodegroupFilter struct { + LogInfoStub func(*v1alpha5.ClusterConfig) + logInfoMutex sync.RWMutex + logInfoArgsForCall []struct { + arg1 *v1alpha5.ClusterConfig + } + MatchStub func(string) bool + matchMutex sync.RWMutex + matchArgsForCall []struct { + arg1 string + } + matchReturns struct { + result1 bool + } + matchReturnsOnCall map[int]struct { + result1 bool + } + SetOnlyLocalStub func(eksiface.EKSAPI, filter.StackLister, *v1alpha5.ClusterConfig) error + setOnlyLocalMutex sync.RWMutex + setOnlyLocalArgsForCall []struct { + arg1 eksiface.EKSAPI + arg2 filter.StackLister + arg3 *v1alpha5.ClusterConfig + } + setOnlyLocalReturns struct { + result1 error + } + setOnlyLocalReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeNodegroupFilter) LogInfo(arg1 *v1alpha5.ClusterConfig) { + fake.logInfoMutex.Lock() + fake.logInfoArgsForCall = append(fake.logInfoArgsForCall, struct { + arg1 *v1alpha5.ClusterConfig + }{arg1}) + stub := fake.LogInfoStub + fake.recordInvocation("LogInfo", []interface{}{arg1}) + fake.logInfoMutex.Unlock() + if stub != nil { + fake.LogInfoStub(arg1) + } +} + +func (fake *FakeNodegroupFilter) LogInfoCallCount() int { + fake.logInfoMutex.RLock() + defer fake.logInfoMutex.RUnlock() + return len(fake.logInfoArgsForCall) +} + +func (fake *FakeNodegroupFilter) LogInfoCalls(stub func(*v1alpha5.ClusterConfig)) { + fake.logInfoMutex.Lock() + defer fake.logInfoMutex.Unlock() + fake.LogInfoStub = stub +} + +func (fake *FakeNodegroupFilter) LogInfoArgsForCall(i int) *v1alpha5.ClusterConfig { + fake.logInfoMutex.RLock() + defer fake.logInfoMutex.RUnlock() + argsForCall := fake.logInfoArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeNodegroupFilter) Match(arg1 string) bool { + fake.matchMutex.Lock() + ret, specificReturn := fake.matchReturnsOnCall[len(fake.matchArgsForCall)] + fake.matchArgsForCall = append(fake.matchArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.MatchStub + fakeReturns := fake.matchReturns + fake.recordInvocation("Match", []interface{}{arg1}) + fake.matchMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeNodegroupFilter) MatchCallCount() int { + fake.matchMutex.RLock() + defer fake.matchMutex.RUnlock() + return len(fake.matchArgsForCall) +} + +func (fake *FakeNodegroupFilter) MatchCalls(stub func(string) bool) { + fake.matchMutex.Lock() + defer fake.matchMutex.Unlock() + fake.MatchStub = stub +} + +func (fake *FakeNodegroupFilter) MatchArgsForCall(i int) string { + fake.matchMutex.RLock() + defer fake.matchMutex.RUnlock() + argsForCall := fake.matchArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeNodegroupFilter) MatchReturns(result1 bool) { + fake.matchMutex.Lock() + defer fake.matchMutex.Unlock() + fake.MatchStub = nil + fake.matchReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakeNodegroupFilter) MatchReturnsOnCall(i int, result1 bool) { + fake.matchMutex.Lock() + defer fake.matchMutex.Unlock() + fake.MatchStub = nil + if fake.matchReturnsOnCall == nil { + fake.matchReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.matchReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *FakeNodegroupFilter) SetOnlyLocal(arg1 eksiface.EKSAPI, arg2 filter.StackLister, arg3 *v1alpha5.ClusterConfig) error { + fake.setOnlyLocalMutex.Lock() + ret, specificReturn := fake.setOnlyLocalReturnsOnCall[len(fake.setOnlyLocalArgsForCall)] + fake.setOnlyLocalArgsForCall = append(fake.setOnlyLocalArgsForCall, struct { + arg1 eksiface.EKSAPI + arg2 filter.StackLister + arg3 *v1alpha5.ClusterConfig + }{arg1, arg2, arg3}) + stub := fake.SetOnlyLocalStub + fakeReturns := fake.setOnlyLocalReturns + fake.recordInvocation("SetOnlyLocal", []interface{}{arg1, arg2, arg3}) + fake.setOnlyLocalMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeNodegroupFilter) SetOnlyLocalCallCount() int { + fake.setOnlyLocalMutex.RLock() + defer fake.setOnlyLocalMutex.RUnlock() + return len(fake.setOnlyLocalArgsForCall) +} + +func (fake *FakeNodegroupFilter) SetOnlyLocalCalls(stub func(eksiface.EKSAPI, filter.StackLister, *v1alpha5.ClusterConfig) error) { + fake.setOnlyLocalMutex.Lock() + defer fake.setOnlyLocalMutex.Unlock() + fake.SetOnlyLocalStub = stub +} + +func (fake *FakeNodegroupFilter) SetOnlyLocalArgsForCall(i int) (eksiface.EKSAPI, filter.StackLister, *v1alpha5.ClusterConfig) { + fake.setOnlyLocalMutex.RLock() + defer fake.setOnlyLocalMutex.RUnlock() + argsForCall := fake.setOnlyLocalArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeNodegroupFilter) SetOnlyLocalReturns(result1 error) { + fake.setOnlyLocalMutex.Lock() + defer fake.setOnlyLocalMutex.Unlock() + fake.SetOnlyLocalStub = nil + fake.setOnlyLocalReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeNodegroupFilter) SetOnlyLocalReturnsOnCall(i int, result1 error) { + fake.setOnlyLocalMutex.Lock() + defer fake.setOnlyLocalMutex.Unlock() + fake.SetOnlyLocalStub = nil + if fake.setOnlyLocalReturnsOnCall == nil { + fake.setOnlyLocalReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.setOnlyLocalReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeNodegroupFilter) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.logInfoMutex.RLock() + defer fake.logInfoMutex.RUnlock() + fake.matchMutex.RLock() + defer fake.matchMutex.RUnlock() + fake.setOnlyLocalMutex.RLock() + defer fake.setOnlyLocalMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeNodegroupFilter) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ filter.NodegroupFilter = new(FakeNodegroupFilter) diff --git a/pkg/ctl/cmdutils/filter/nodegroup_filter.go b/pkg/ctl/cmdutils/filter/nodegroup_filter.go index 68b8800456..92e0272f3b 100644 --- a/pkg/ctl/cmdutils/filter/nodegroup_filter.go +++ b/pkg/ctl/cmdutils/filter/nodegroup_filter.go @@ -14,6 +14,20 @@ import ( "github.com/weaveworks/eksctl/pkg/cfn/manager" ) +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate +//counterfeiter:generate . NodegroupFilter +// NodegroupFilter is an interface that holds filter configuration +type NodegroupFilter interface { + SetOnlyLocal(eksAPI eksiface.EKSAPI, lister StackLister, clusterConfig *api.ClusterConfig) error + Match(ngName string) bool + LogInfo(cfg *api.ClusterConfig) +} + +// StackLister lists nodegroup stacks +type StackLister interface { + ListNodeGroupStacks() ([]manager.NodeGroupStack, error) +} + // NodeGroupFilter holds filter configuration type NodeGroupFilter struct { delegate *Filter @@ -23,7 +37,7 @@ type NodeGroupFilter struct { remoteNodegroups sets.String } -// NewNodeGroupFilter create new NodeGroupFilter instance +// NewNodeGroupFilter creates a new NodeGroupFilter struct func NewNodeGroupFilter() *NodeGroupFilter { return &NodeGroupFilter{ delegate: &Filter{ @@ -59,16 +73,11 @@ func (f *NodeGroupFilter) AppendIncludeNames(names ...string) { f.delegate.AppendIncludeNames(names...) } -// A stackLister lists nodegroup stacks -type stackLister interface { - ListNodeGroupStacks() ([]manager.NodeGroupStack, error) -} - -// SetOnlyLocal uses stackLister to list existing nodegroup stacks and configures +// SetOnlyLocal uses StackLister to list existing nodegroup stacks and configures // the filter to only include the nodegroups that don't exist in the cluster already. // Note: they are present in the config file but not in the cluster. This is used by // the create nodegroup command -func (f *NodeGroupFilter) SetOnlyLocal(eksAPI eksiface.EKSAPI, lister stackLister, clusterConfig *api.ClusterConfig) error { +func (f *NodeGroupFilter) SetOnlyLocal(eksAPI eksiface.EKSAPI, lister StackLister, clusterConfig *api.ClusterConfig) error { f.onlyLocal = true err := f.loadLocalAndRemoteNodegroups(eksAPI, lister, clusterConfig) @@ -83,10 +92,10 @@ func (f *NodeGroupFilter) SetOnlyLocal(eksAPI eksiface.EKSAPI, lister stackListe return nil } -// SetOnlyRemote uses stackLister to list existing nodegroup stacks and configures +// SetOnlyRemote uses StackLister to list existing nodegroup stacks and configures // the filter to exclude nodegroups already defined in the config file. It will include the // nodegroups that exist in the cluster but not in the config -func (f *NodeGroupFilter) SetOnlyRemote(eksAPI eksiface.EKSAPI, lister stackLister, clusterConfig *api.ClusterConfig) error { +func (f *NodeGroupFilter) SetOnlyRemote(eksAPI eksiface.EKSAPI, lister StackLister, clusterConfig *api.ClusterConfig) error { f.onlyRemote = true err := f.loadLocalAndRemoteNodegroups(eksAPI, lister, clusterConfig) @@ -111,7 +120,7 @@ func (f *NodeGroupFilter) GetExcludeAll() bool { return f.delegate.ExcludeAll } -func (f *NodeGroupFilter) loadLocalAndRemoteNodegroups(eksAPI eksiface.EKSAPI, lister stackLister, clusterConfig *api.ClusterConfig) error { +func (f *NodeGroupFilter) loadLocalAndRemoteNodegroups(eksAPI eksiface.EKSAPI, lister StackLister, clusterConfig *api.ClusterConfig) error { nodeGroupsWithStacks, nodeGroupsWithoutStacks, err := f.findAllNodeGroups(eksAPI, lister, clusterConfig) if err != nil { return err @@ -157,7 +166,7 @@ func (f *NodeGroupFilter) loadLocalAndRemoteNodegroups(eksAPI eksiface.EKSAPI, l return nil } -func (f *NodeGroupFilter) findAllNodeGroups(eksAPI eksiface.EKSAPI, lister stackLister, clusterConfig *api.ClusterConfig) ([]manager.NodeGroupStack, []string, error) { +func (f *NodeGroupFilter) findAllNodeGroups(eksAPI eksiface.EKSAPI, lister StackLister, clusterConfig *api.ClusterConfig) ([]manager.NodeGroupStack, []string, error) { // Get remote nodegroups stacks nodeGroupsWithStacks, err := lister.ListNodeGroupStacks() if err != nil { diff --git a/pkg/ctl/create/nodegroup.go b/pkg/ctl/create/nodegroup.go index 7965f233bb..e873a3e047 100644 --- a/pkg/ctl/create/nodegroup.go +++ b/pkg/ctl/create/nodegroup.go @@ -60,7 +60,7 @@ func createNodeGroupCmd(cmd *cmdutils.Cmd) { DryRun: options.DryRun, SkipOutdatedAddonsCheck: options.SkipOutdatedAddonsCheck, ConfigFileProvided: cmd.ClusterConfigFile != "", - }, *ngFilter) + }, ngFilter) }) } diff --git a/pkg/eks/api.go b/pkg/eks/api.go index 2cd36dc3ee..09d2bca55e 100644 --- a/pkg/eks/api.go +++ b/pkg/eks/api.go @@ -43,6 +43,8 @@ import ( api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/az" "github.com/weaveworks/eksctl/pkg/cfn/manager" + "github.com/weaveworks/eksctl/pkg/kubernetes" + kubewrapper "github.com/weaveworks/eksctl/pkg/kubernetes" "github.com/weaveworks/eksctl/pkg/utils" "github.com/weaveworks/eksctl/pkg/version" ) @@ -55,6 +57,18 @@ type ClusterProvider struct { Status *ProviderStatus } +//counterfeiter:generate -o fakes/fake_kube_provider.go . KubeProvider +// KubeProvider is an interface with helper funcs for k8s and EKS that are part of ClusterProvider +type KubeProvider interface { + NewRawClient(spec *api.ClusterConfig) (*kubewrapper.RawClient, error) + ServerVersion(rawClient *kubernetes.RawClient) (string, error) + LoadClusterIntoSpecFromStack(spec *api.ClusterConfig, stackManager manager.StackManager) error + SupportsManagedNodes(clusterConfig *api.ClusterConfig) (bool, error) + ValidateClusterForCompatibility(cfg *api.ClusterConfig, stackManager manager.StackManager) error + UpdateAuthConfigMap(nodeGroups []*api.NodeGroup, clientSet kubernetes.Interface) error + WaitForNodes(clientSet kubernetes.Interface, ng KubeNodeGroup) error +} + // ProviderServices stores the used APIs type ProviderServices struct { spec *api.ProviderConfig diff --git a/pkg/eks/client.go b/pkg/eks/client.go index d54370ca45..0efb8664df 100644 --- a/pkg/eks/client.go +++ b/pkg/eks/client.go @@ -1,10 +1,17 @@ package eks import ( + "context" + "fmt" "strings" + "time" + "github.com/kris-nova/logger" "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -15,6 +22,7 @@ import ( "sigs.k8s.io/aws-iam-authenticator/pkg/token" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/authconfigmap" kubewrapper "github.com/weaveworks/eksctl/pkg/kubernetes" "github.com/weaveworks/eksctl/pkg/utils/kubeconfig" ) @@ -115,3 +123,76 @@ func (c *ClusterProvider) NewRawClient(spec *api.ClusterConfig) (*kubewrapper.Ra return kubewrapper.NewRawClient(clientSet, client.rawConfig) } + +// ServerVersion will use discovery API to fetch version of Kubernetes control plane +func (c *ClusterProvider) ServerVersion(rawClient *kubewrapper.RawClient) (string, error) { + return rawClient.ServerVersion() +} + +// UpdateAuthConfigMap creates or adds a nodegroup IAM role in the auth ConfigMap for the given nodegroup. +func (c *ClusterProvider) UpdateAuthConfigMap(nodeGroups []*api.NodeGroup, clientSet kubernetes.Interface) error { + for _, ng := range nodeGroups { + // authorise nodes to join + if err := authconfigmap.AddNodeGroup(clientSet, ng); err != nil { + return err + } + + // wait for nodes to join + if err := c.WaitForNodes(clientSet, ng); err != nil { + return err + } + } + return nil +} + +// WaitForNodes waits till the nodes are ready +func (c *ClusterProvider) WaitForNodes(clientSet kubernetes.Interface, ng KubeNodeGroup) error { + minSize := ng.Size() + if minSize == 0 { + return nil + } + timer := time.After(c.Provider.WaitTimeout()) + timeout := false + readyNodes := sets.NewString() + watcher, err := clientSet.CoreV1().Nodes().Watch(context.TODO(), ng.ListOptions()) + if err != nil { + return errors.Wrap(err, "creating node watcher") + } + + counter, err := getNodes(clientSet, ng) + if err != nil { + return errors.Wrap(err, "listing nodes") + } + + logger.Info("waiting for at least %d node(s) to become ready in %q", minSize, ng.NameString()) + for !timeout && counter < minSize { + select { + case event := <-watcher.ResultChan(): + logger.Debug("event = %#v", event) + if event.Object != nil && event.Type != watch.Deleted { + if node, ok := event.Object.(*corev1.Node); ok { + if isNodeReady(node) { + readyNodes.Insert(node.Name) + counter = readyNodes.Len() + logger.Debug("node %q is ready in %q", node.Name, ng.NameString()) + } else { + logger.Debug("node %q seen in %q, but not ready yet", node.Name, ng.NameString()) + logger.Debug("node = %#v", *node) + } + } + } + case <-timer: + timeout = true + } + } + watcher.Stop() + if timeout { + return fmt.Errorf("timed out (after %s) waiting for at least %d nodes to join the cluster and become ready in %q", c.Provider.WaitTimeout(), minSize, ng.NameString()) + } + + if _, err = getNodes(clientSet, ng); err != nil { + return errors.Wrap(err, "re-listing nodes") + } + + return nil +} diff --git a/pkg/eks/compatibility.go b/pkg/eks/compatibility.go index 94136160a6..3ea7dada02 100644 --- a/pkg/eks/compatibility.go +++ b/pkg/eks/compatibility.go @@ -2,7 +2,6 @@ package eks import ( "fmt" - "strings" "github.com/kris-nova/logger" "github.com/pkg/errors" @@ -98,48 +97,3 @@ func isNodeGroupCompatible(name string, info manager.StackInfo) (bool, error) { return true, nil } - -// ValidateExistingNodeGroupsForCompatibility looks at each of the existing nodegroups and -// validates configuration, if it find issues it logs messages -func (c *ClusterProvider) ValidateExistingNodeGroupsForCompatibility(cfg *api.ClusterConfig, stackManager manager.StackManager) error { - infoByNodeGroup, err := stackManager.DescribeNodeGroupStacksAndResources() - if err != nil { - return errors.Wrap(err, "getting resources for all nodegroup stacks") - } - if len(infoByNodeGroup) == 0 { - return nil - } - - logger.Info("checking security group configuration for all nodegroups") - incompatibleNodeGroups := []string{} - for ng, info := range infoByNodeGroup { - if stackManager.StackStatusIsNotTransitional(info.Stack) { - isCompatible, err := isNodeGroupCompatible(ng, info) - if err != nil { - return err - } - if isCompatible { - logger.Debug("nodegroup %q is compatible", ng) - } else { - logger.Debug("nodegroup %q is incompatible", ng) - incompatibleNodeGroups = append(incompatibleNodeGroups, ng) - } - } - } - - numIncompatibleNodeGroups := len(incompatibleNodeGroups) - if numIncompatibleNodeGroups == 0 { - logger.Info("all nodegroups have up-to-date configuration") - return nil - } - - logger.Critical("found %d nodegroup(s) (%s) without shared security group, cluster networking maybe be broken", - numIncompatibleNodeGroups, strings.Join(incompatibleNodeGroups, ", ")) - logger.Critical("it's recommended to create new nodegroups, then delete old ones") - if cfg.VPC.SharedNodeSecurityGroup != "" { - logger.Critical("as a temporary fix, you can patch the configuration and add each of these nodegroup(s) to %q", - cfg.VPC.SharedNodeSecurityGroup) - } - - return nil -} diff --git a/pkg/eks/fakes/fake_kube_provider.go b/pkg/eks/fakes/fake_kube_provider.go new file mode 100644 index 0000000000..f661ec5b59 --- /dev/null +++ b/pkg/eks/fakes/fake_kube_provider.go @@ -0,0 +1,587 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fakes + +import ( + "sync" + + "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/cfn/manager" + "github.com/weaveworks/eksctl/pkg/eks" + "github.com/weaveworks/eksctl/pkg/kubernetes" + kubernetesa "k8s.io/client-go/kubernetes" +) + +type FakeKubeProvider struct { + LoadClusterIntoSpecFromStackStub func(*v1alpha5.ClusterConfig, manager.StackManager) error + loadClusterIntoSpecFromStackMutex sync.RWMutex + loadClusterIntoSpecFromStackArgsForCall []struct { + arg1 *v1alpha5.ClusterConfig + arg2 manager.StackManager + } + loadClusterIntoSpecFromStackReturns struct { + result1 error + } + loadClusterIntoSpecFromStackReturnsOnCall map[int]struct { + result1 error + } + NewRawClientStub func(*v1alpha5.ClusterConfig) (*kubernetes.RawClient, error) + newRawClientMutex sync.RWMutex + newRawClientArgsForCall []struct { + arg1 *v1alpha5.ClusterConfig + } + newRawClientReturns struct { + result1 *kubernetes.RawClient + result2 error + } + newRawClientReturnsOnCall map[int]struct { + result1 *kubernetes.RawClient + result2 error + } + ServerVersionStub func(*kubernetes.RawClient) (string, error) + serverVersionMutex sync.RWMutex + serverVersionArgsForCall []struct { + arg1 *kubernetes.RawClient + } + serverVersionReturns struct { + result1 string + result2 error + } + serverVersionReturnsOnCall map[int]struct { + result1 string + result2 error + } + SupportsManagedNodesStub func(*v1alpha5.ClusterConfig) (bool, error) + supportsManagedNodesMutex sync.RWMutex + supportsManagedNodesArgsForCall []struct { + arg1 *v1alpha5.ClusterConfig + } + supportsManagedNodesReturns struct { + result1 bool + result2 error + } + supportsManagedNodesReturnsOnCall map[int]struct { + result1 bool + result2 error + } + UpdateAuthConfigMapStub func([]*v1alpha5.NodeGroup, kubernetesa.Interface) error + updateAuthConfigMapMutex sync.RWMutex + updateAuthConfigMapArgsForCall []struct { + arg1 []*v1alpha5.NodeGroup + arg2 kubernetesa.Interface + } + updateAuthConfigMapReturns struct { + result1 error + } + updateAuthConfigMapReturnsOnCall map[int]struct { + result1 error + } + ValidateClusterForCompatibilityStub func(*v1alpha5.ClusterConfig, manager.StackManager) error + validateClusterForCompatibilityMutex sync.RWMutex + validateClusterForCompatibilityArgsForCall []struct { + arg1 *v1alpha5.ClusterConfig + arg2 manager.StackManager + } + validateClusterForCompatibilityReturns struct { + result1 error + } + validateClusterForCompatibilityReturnsOnCall map[int]struct { + result1 error + } + WaitForNodesStub func(kubernetesa.Interface, eks.KubeNodeGroup) error + waitForNodesMutex sync.RWMutex + waitForNodesArgsForCall []struct { + arg1 kubernetesa.Interface + arg2 eks.KubeNodeGroup + } + waitForNodesReturns struct { + result1 error + } + waitForNodesReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeKubeProvider) LoadClusterIntoSpecFromStack(arg1 *v1alpha5.ClusterConfig, arg2 manager.StackManager) error { + fake.loadClusterIntoSpecFromStackMutex.Lock() + ret, specificReturn := fake.loadClusterIntoSpecFromStackReturnsOnCall[len(fake.loadClusterIntoSpecFromStackArgsForCall)] + fake.loadClusterIntoSpecFromStackArgsForCall = append(fake.loadClusterIntoSpecFromStackArgsForCall, struct { + arg1 *v1alpha5.ClusterConfig + arg2 manager.StackManager + }{arg1, arg2}) + stub := fake.LoadClusterIntoSpecFromStackStub + fakeReturns := fake.loadClusterIntoSpecFromStackReturns + fake.recordInvocation("LoadClusterIntoSpecFromStack", []interface{}{arg1, arg2}) + fake.loadClusterIntoSpecFromStackMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeKubeProvider) LoadClusterIntoSpecFromStackCallCount() int { + fake.loadClusterIntoSpecFromStackMutex.RLock() + defer fake.loadClusterIntoSpecFromStackMutex.RUnlock() + return len(fake.loadClusterIntoSpecFromStackArgsForCall) +} + +func (fake *FakeKubeProvider) LoadClusterIntoSpecFromStackCalls(stub func(*v1alpha5.ClusterConfig, manager.StackManager) error) { + fake.loadClusterIntoSpecFromStackMutex.Lock() + defer fake.loadClusterIntoSpecFromStackMutex.Unlock() + fake.LoadClusterIntoSpecFromStackStub = stub +} + +func (fake *FakeKubeProvider) LoadClusterIntoSpecFromStackArgsForCall(i int) (*v1alpha5.ClusterConfig, manager.StackManager) { + fake.loadClusterIntoSpecFromStackMutex.RLock() + defer fake.loadClusterIntoSpecFromStackMutex.RUnlock() + argsForCall := fake.loadClusterIntoSpecFromStackArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeKubeProvider) LoadClusterIntoSpecFromStackReturns(result1 error) { + fake.loadClusterIntoSpecFromStackMutex.Lock() + defer fake.loadClusterIntoSpecFromStackMutex.Unlock() + fake.LoadClusterIntoSpecFromStackStub = nil + fake.loadClusterIntoSpecFromStackReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeKubeProvider) LoadClusterIntoSpecFromStackReturnsOnCall(i int, result1 error) { + fake.loadClusterIntoSpecFromStackMutex.Lock() + defer fake.loadClusterIntoSpecFromStackMutex.Unlock() + fake.LoadClusterIntoSpecFromStackStub = nil + if fake.loadClusterIntoSpecFromStackReturnsOnCall == nil { + fake.loadClusterIntoSpecFromStackReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.loadClusterIntoSpecFromStackReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeKubeProvider) NewRawClient(arg1 *v1alpha5.ClusterConfig) (*kubernetes.RawClient, error) { + fake.newRawClientMutex.Lock() + ret, specificReturn := fake.newRawClientReturnsOnCall[len(fake.newRawClientArgsForCall)] + fake.newRawClientArgsForCall = append(fake.newRawClientArgsForCall, struct { + arg1 *v1alpha5.ClusterConfig + }{arg1}) + stub := fake.NewRawClientStub + fakeReturns := fake.newRawClientReturns + fake.recordInvocation("NewRawClient", []interface{}{arg1}) + fake.newRawClientMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeKubeProvider) NewRawClientCallCount() int { + fake.newRawClientMutex.RLock() + defer fake.newRawClientMutex.RUnlock() + return len(fake.newRawClientArgsForCall) +} + +func (fake *FakeKubeProvider) NewRawClientCalls(stub func(*v1alpha5.ClusterConfig) (*kubernetes.RawClient, error)) { + fake.newRawClientMutex.Lock() + defer fake.newRawClientMutex.Unlock() + fake.NewRawClientStub = stub +} + +func (fake *FakeKubeProvider) NewRawClientArgsForCall(i int) *v1alpha5.ClusterConfig { + fake.newRawClientMutex.RLock() + defer fake.newRawClientMutex.RUnlock() + argsForCall := fake.newRawClientArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeKubeProvider) NewRawClientReturns(result1 *kubernetes.RawClient, result2 error) { + fake.newRawClientMutex.Lock() + defer fake.newRawClientMutex.Unlock() + fake.NewRawClientStub = nil + fake.newRawClientReturns = struct { + result1 *kubernetes.RawClient + result2 error + }{result1, result2} +} + +func (fake *FakeKubeProvider) NewRawClientReturnsOnCall(i int, result1 *kubernetes.RawClient, result2 error) { + fake.newRawClientMutex.Lock() + defer fake.newRawClientMutex.Unlock() + fake.NewRawClientStub = nil + if fake.newRawClientReturnsOnCall == nil { + fake.newRawClientReturnsOnCall = make(map[int]struct { + result1 *kubernetes.RawClient + result2 error + }) + } + fake.newRawClientReturnsOnCall[i] = struct { + result1 *kubernetes.RawClient + result2 error + }{result1, result2} +} + +func (fake *FakeKubeProvider) ServerVersion(arg1 *kubernetes.RawClient) (string, error) { + fake.serverVersionMutex.Lock() + ret, specificReturn := fake.serverVersionReturnsOnCall[len(fake.serverVersionArgsForCall)] + fake.serverVersionArgsForCall = append(fake.serverVersionArgsForCall, struct { + arg1 *kubernetes.RawClient + }{arg1}) + stub := fake.ServerVersionStub + fakeReturns := fake.serverVersionReturns + fake.recordInvocation("ServerVersion", []interface{}{arg1}) + fake.serverVersionMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeKubeProvider) ServerVersionCallCount() int { + fake.serverVersionMutex.RLock() + defer fake.serverVersionMutex.RUnlock() + return len(fake.serverVersionArgsForCall) +} + +func (fake *FakeKubeProvider) ServerVersionCalls(stub func(*kubernetes.RawClient) (string, error)) { + fake.serverVersionMutex.Lock() + defer fake.serverVersionMutex.Unlock() + fake.ServerVersionStub = stub +} + +func (fake *FakeKubeProvider) ServerVersionArgsForCall(i int) *kubernetes.RawClient { + fake.serverVersionMutex.RLock() + defer fake.serverVersionMutex.RUnlock() + argsForCall := fake.serverVersionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeKubeProvider) ServerVersionReturns(result1 string, result2 error) { + fake.serverVersionMutex.Lock() + defer fake.serverVersionMutex.Unlock() + fake.ServerVersionStub = nil + fake.serverVersionReturns = struct { + result1 string + result2 error + }{result1, result2} +} + +func (fake *FakeKubeProvider) ServerVersionReturnsOnCall(i int, result1 string, result2 error) { + fake.serverVersionMutex.Lock() + defer fake.serverVersionMutex.Unlock() + fake.ServerVersionStub = nil + if fake.serverVersionReturnsOnCall == nil { + fake.serverVersionReturnsOnCall = make(map[int]struct { + result1 string + result2 error + }) + } + fake.serverVersionReturnsOnCall[i] = struct { + result1 string + result2 error + }{result1, result2} +} + +func (fake *FakeKubeProvider) SupportsManagedNodes(arg1 *v1alpha5.ClusterConfig) (bool, error) { + fake.supportsManagedNodesMutex.Lock() + ret, specificReturn := fake.supportsManagedNodesReturnsOnCall[len(fake.supportsManagedNodesArgsForCall)] + fake.supportsManagedNodesArgsForCall = append(fake.supportsManagedNodesArgsForCall, struct { + arg1 *v1alpha5.ClusterConfig + }{arg1}) + stub := fake.SupportsManagedNodesStub + fakeReturns := fake.supportsManagedNodesReturns + fake.recordInvocation("SupportsManagedNodes", []interface{}{arg1}) + fake.supportsManagedNodesMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeKubeProvider) SupportsManagedNodesCallCount() int { + fake.supportsManagedNodesMutex.RLock() + defer fake.supportsManagedNodesMutex.RUnlock() + return len(fake.supportsManagedNodesArgsForCall) +} + +func (fake *FakeKubeProvider) SupportsManagedNodesCalls(stub func(*v1alpha5.ClusterConfig) (bool, error)) { + fake.supportsManagedNodesMutex.Lock() + defer fake.supportsManagedNodesMutex.Unlock() + fake.SupportsManagedNodesStub = stub +} + +func (fake *FakeKubeProvider) SupportsManagedNodesArgsForCall(i int) *v1alpha5.ClusterConfig { + fake.supportsManagedNodesMutex.RLock() + defer fake.supportsManagedNodesMutex.RUnlock() + argsForCall := fake.supportsManagedNodesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeKubeProvider) SupportsManagedNodesReturns(result1 bool, result2 error) { + fake.supportsManagedNodesMutex.Lock() + defer fake.supportsManagedNodesMutex.Unlock() + fake.SupportsManagedNodesStub = nil + fake.supportsManagedNodesReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeKubeProvider) SupportsManagedNodesReturnsOnCall(i int, result1 bool, result2 error) { + fake.supportsManagedNodesMutex.Lock() + defer fake.supportsManagedNodesMutex.Unlock() + fake.SupportsManagedNodesStub = nil + if fake.supportsManagedNodesReturnsOnCall == nil { + fake.supportsManagedNodesReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.supportsManagedNodesReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeKubeProvider) UpdateAuthConfigMap(arg1 []*v1alpha5.NodeGroup, arg2 kubernetesa.Interface) error { + var arg1Copy []*v1alpha5.NodeGroup + if arg1 != nil { + arg1Copy = make([]*v1alpha5.NodeGroup, len(arg1)) + copy(arg1Copy, arg1) + } + fake.updateAuthConfigMapMutex.Lock() + ret, specificReturn := fake.updateAuthConfigMapReturnsOnCall[len(fake.updateAuthConfigMapArgsForCall)] + fake.updateAuthConfigMapArgsForCall = append(fake.updateAuthConfigMapArgsForCall, struct { + arg1 []*v1alpha5.NodeGroup + arg2 kubernetesa.Interface + }{arg1Copy, arg2}) + stub := fake.UpdateAuthConfigMapStub + fakeReturns := fake.updateAuthConfigMapReturns + fake.recordInvocation("UpdateAuthConfigMap", []interface{}{arg1Copy, arg2}) + fake.updateAuthConfigMapMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeKubeProvider) UpdateAuthConfigMapCallCount() int { + fake.updateAuthConfigMapMutex.RLock() + defer fake.updateAuthConfigMapMutex.RUnlock() + return len(fake.updateAuthConfigMapArgsForCall) +} + +func (fake *FakeKubeProvider) UpdateAuthConfigMapCalls(stub func([]*v1alpha5.NodeGroup, kubernetesa.Interface) error) { + fake.updateAuthConfigMapMutex.Lock() + defer fake.updateAuthConfigMapMutex.Unlock() + fake.UpdateAuthConfigMapStub = stub +} + +func (fake *FakeKubeProvider) UpdateAuthConfigMapArgsForCall(i int) ([]*v1alpha5.NodeGroup, kubernetesa.Interface) { + fake.updateAuthConfigMapMutex.RLock() + defer fake.updateAuthConfigMapMutex.RUnlock() + argsForCall := fake.updateAuthConfigMapArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeKubeProvider) UpdateAuthConfigMapReturns(result1 error) { + fake.updateAuthConfigMapMutex.Lock() + defer fake.updateAuthConfigMapMutex.Unlock() + fake.UpdateAuthConfigMapStub = nil + fake.updateAuthConfigMapReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeKubeProvider) UpdateAuthConfigMapReturnsOnCall(i int, result1 error) { + fake.updateAuthConfigMapMutex.Lock() + defer fake.updateAuthConfigMapMutex.Unlock() + fake.UpdateAuthConfigMapStub = nil + if fake.updateAuthConfigMapReturnsOnCall == nil { + fake.updateAuthConfigMapReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateAuthConfigMapReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeKubeProvider) ValidateClusterForCompatibility(arg1 *v1alpha5.ClusterConfig, arg2 manager.StackManager) error { + fake.validateClusterForCompatibilityMutex.Lock() + ret, specificReturn := fake.validateClusterForCompatibilityReturnsOnCall[len(fake.validateClusterForCompatibilityArgsForCall)] + fake.validateClusterForCompatibilityArgsForCall = append(fake.validateClusterForCompatibilityArgsForCall, struct { + arg1 *v1alpha5.ClusterConfig + arg2 manager.StackManager + }{arg1, arg2}) + stub := fake.ValidateClusterForCompatibilityStub + fakeReturns := fake.validateClusterForCompatibilityReturns + fake.recordInvocation("ValidateClusterForCompatibility", []interface{}{arg1, arg2}) + fake.validateClusterForCompatibilityMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeKubeProvider) ValidateClusterForCompatibilityCallCount() int { + fake.validateClusterForCompatibilityMutex.RLock() + defer fake.validateClusterForCompatibilityMutex.RUnlock() + return len(fake.validateClusterForCompatibilityArgsForCall) +} + +func (fake *FakeKubeProvider) ValidateClusterForCompatibilityCalls(stub func(*v1alpha5.ClusterConfig, manager.StackManager) error) { + fake.validateClusterForCompatibilityMutex.Lock() + defer fake.validateClusterForCompatibilityMutex.Unlock() + fake.ValidateClusterForCompatibilityStub = stub +} + +func (fake *FakeKubeProvider) ValidateClusterForCompatibilityArgsForCall(i int) (*v1alpha5.ClusterConfig, manager.StackManager) { + fake.validateClusterForCompatibilityMutex.RLock() + defer fake.validateClusterForCompatibilityMutex.RUnlock() + argsForCall := fake.validateClusterForCompatibilityArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeKubeProvider) ValidateClusterForCompatibilityReturns(result1 error) { + fake.validateClusterForCompatibilityMutex.Lock() + defer fake.validateClusterForCompatibilityMutex.Unlock() + fake.ValidateClusterForCompatibilityStub = nil + fake.validateClusterForCompatibilityReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeKubeProvider) ValidateClusterForCompatibilityReturnsOnCall(i int, result1 error) { + fake.validateClusterForCompatibilityMutex.Lock() + defer fake.validateClusterForCompatibilityMutex.Unlock() + fake.ValidateClusterForCompatibilityStub = nil + if fake.validateClusterForCompatibilityReturnsOnCall == nil { + fake.validateClusterForCompatibilityReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.validateClusterForCompatibilityReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeKubeProvider) WaitForNodes(arg1 kubernetesa.Interface, arg2 eks.KubeNodeGroup) error { + fake.waitForNodesMutex.Lock() + ret, specificReturn := fake.waitForNodesReturnsOnCall[len(fake.waitForNodesArgsForCall)] + fake.waitForNodesArgsForCall = append(fake.waitForNodesArgsForCall, struct { + arg1 kubernetesa.Interface + arg2 eks.KubeNodeGroup + }{arg1, arg2}) + stub := fake.WaitForNodesStub + fakeReturns := fake.waitForNodesReturns + fake.recordInvocation("WaitForNodes", []interface{}{arg1, arg2}) + fake.waitForNodesMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeKubeProvider) WaitForNodesCallCount() int { + fake.waitForNodesMutex.RLock() + defer fake.waitForNodesMutex.RUnlock() + return len(fake.waitForNodesArgsForCall) +} + +func (fake *FakeKubeProvider) WaitForNodesCalls(stub func(kubernetesa.Interface, eks.KubeNodeGroup) error) { + fake.waitForNodesMutex.Lock() + defer fake.waitForNodesMutex.Unlock() + fake.WaitForNodesStub = stub +} + +func (fake *FakeKubeProvider) WaitForNodesArgsForCall(i int) (kubernetesa.Interface, eks.KubeNodeGroup) { + fake.waitForNodesMutex.RLock() + defer fake.waitForNodesMutex.RUnlock() + argsForCall := fake.waitForNodesArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeKubeProvider) WaitForNodesReturns(result1 error) { + fake.waitForNodesMutex.Lock() + defer fake.waitForNodesMutex.Unlock() + fake.WaitForNodesStub = nil + fake.waitForNodesReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeKubeProvider) WaitForNodesReturnsOnCall(i int, result1 error) { + fake.waitForNodesMutex.Lock() + defer fake.waitForNodesMutex.Unlock() + fake.WaitForNodesStub = nil + if fake.waitForNodesReturnsOnCall == nil { + fake.waitForNodesReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.waitForNodesReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeKubeProvider) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.loadClusterIntoSpecFromStackMutex.RLock() + defer fake.loadClusterIntoSpecFromStackMutex.RUnlock() + fake.newRawClientMutex.RLock() + defer fake.newRawClientMutex.RUnlock() + fake.serverVersionMutex.RLock() + defer fake.serverVersionMutex.RUnlock() + fake.supportsManagedNodesMutex.RLock() + defer fake.supportsManagedNodesMutex.RUnlock() + fake.updateAuthConfigMapMutex.RLock() + defer fake.updateAuthConfigMapMutex.RUnlock() + fake.validateClusterForCompatibilityMutex.RLock() + defer fake.validateClusterForCompatibilityMutex.RUnlock() + fake.waitForNodesMutex.RLock() + defer fake.waitForNodesMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeKubeProvider) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ eks.KubeProvider = new(FakeKubeProvider) diff --git a/pkg/eks/fakes/fake_nodegroup_initialiser.go b/pkg/eks/fakes/fake_nodegroup_initialiser.go new file mode 100644 index 0000000000..812ead3048 --- /dev/null +++ b/pkg/eks/fakes/fake_nodegroup_initialiser.go @@ -0,0 +1,594 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fakes + +import ( + "sync" + + "github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector" + "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/cfn/manager" + "github.com/weaveworks/eksctl/pkg/eks" + "github.com/weaveworks/eksctl/pkg/utils/tasks" + "k8s.io/client-go/kubernetes" +) + +type FakeNodeGroupInitialiser struct { + DoAllNodegroupStackTasksStub func(*tasks.TaskTree, string, string) error + doAllNodegroupStackTasksMutex sync.RWMutex + doAllNodegroupStackTasksArgsForCall []struct { + arg1 *tasks.TaskTree + arg2 string + arg3 string + } + doAllNodegroupStackTasksReturns struct { + result1 error + } + doAllNodegroupStackTasksReturnsOnCall map[int]struct { + result1 error + } + DoesAWSNodeUseIRSAStub func(v1alpha5.ClusterProvider, kubernetes.Interface) (bool, error) + doesAWSNodeUseIRSAMutex sync.RWMutex + doesAWSNodeUseIRSAArgsForCall []struct { + arg1 v1alpha5.ClusterProvider + arg2 kubernetes.Interface + } + doesAWSNodeUseIRSAReturns struct { + result1 bool + result2 error + } + doesAWSNodeUseIRSAReturnsOnCall map[int]struct { + result1 bool + result2 error + } + ExpandInstanceSelectorOptionsStub func([]v1alpha5.NodePool, []string) error + expandInstanceSelectorOptionsMutex sync.RWMutex + expandInstanceSelectorOptionsArgsForCall []struct { + arg1 []v1alpha5.NodePool + arg2 []string + } + expandInstanceSelectorOptionsReturns struct { + result1 error + } + expandInstanceSelectorOptionsReturnsOnCall map[int]struct { + result1 error + } + NewAWSSelectorSessionStub func(v1alpha5.ClusterProvider) *selector.Selector + newAWSSelectorSessionMutex sync.RWMutex + newAWSSelectorSessionArgsForCall []struct { + arg1 v1alpha5.ClusterProvider + } + newAWSSelectorSessionReturns struct { + result1 *selector.Selector + } + newAWSSelectorSessionReturnsOnCall map[int]struct { + result1 *selector.Selector + } + NormalizeStub func([]v1alpha5.NodePool, *v1alpha5.ClusterMeta) error + normalizeMutex sync.RWMutex + normalizeArgsForCall []struct { + arg1 []v1alpha5.NodePool + arg2 *v1alpha5.ClusterMeta + } + normalizeReturns struct { + result1 error + } + normalizeReturnsOnCall map[int]struct { + result1 error + } + ValidateExistingNodeGroupsForCompatibilityStub func(*v1alpha5.ClusterConfig, manager.StackManager) error + validateExistingNodeGroupsForCompatibilityMutex sync.RWMutex + validateExistingNodeGroupsForCompatibilityArgsForCall []struct { + arg1 *v1alpha5.ClusterConfig + arg2 manager.StackManager + } + validateExistingNodeGroupsForCompatibilityReturns struct { + result1 error + } + validateExistingNodeGroupsForCompatibilityReturnsOnCall map[int]struct { + result1 error + } + ValidateLegacySubnetsForNodeGroupsStub func(*v1alpha5.ClusterConfig, v1alpha5.ClusterProvider) error + validateLegacySubnetsForNodeGroupsMutex sync.RWMutex + validateLegacySubnetsForNodeGroupsArgsForCall []struct { + arg1 *v1alpha5.ClusterConfig + arg2 v1alpha5.ClusterProvider + } + validateLegacySubnetsForNodeGroupsReturns struct { + result1 error + } + validateLegacySubnetsForNodeGroupsReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeNodeGroupInitialiser) DoAllNodegroupStackTasks(arg1 *tasks.TaskTree, arg2 string, arg3 string) error { + fake.doAllNodegroupStackTasksMutex.Lock() + ret, specificReturn := fake.doAllNodegroupStackTasksReturnsOnCall[len(fake.doAllNodegroupStackTasksArgsForCall)] + fake.doAllNodegroupStackTasksArgsForCall = append(fake.doAllNodegroupStackTasksArgsForCall, struct { + arg1 *tasks.TaskTree + arg2 string + arg3 string + }{arg1, arg2, arg3}) + stub := fake.DoAllNodegroupStackTasksStub + fakeReturns := fake.doAllNodegroupStackTasksReturns + fake.recordInvocation("DoAllNodegroupStackTasks", []interface{}{arg1, arg2, arg3}) + fake.doAllNodegroupStackTasksMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeNodeGroupInitialiser) DoAllNodegroupStackTasksCallCount() int { + fake.doAllNodegroupStackTasksMutex.RLock() + defer fake.doAllNodegroupStackTasksMutex.RUnlock() + return len(fake.doAllNodegroupStackTasksArgsForCall) +} + +func (fake *FakeNodeGroupInitialiser) DoAllNodegroupStackTasksCalls(stub func(*tasks.TaskTree, string, string) error) { + fake.doAllNodegroupStackTasksMutex.Lock() + defer fake.doAllNodegroupStackTasksMutex.Unlock() + fake.DoAllNodegroupStackTasksStub = stub +} + +func (fake *FakeNodeGroupInitialiser) DoAllNodegroupStackTasksArgsForCall(i int) (*tasks.TaskTree, string, string) { + fake.doAllNodegroupStackTasksMutex.RLock() + defer fake.doAllNodegroupStackTasksMutex.RUnlock() + argsForCall := fake.doAllNodegroupStackTasksArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeNodeGroupInitialiser) DoAllNodegroupStackTasksReturns(result1 error) { + fake.doAllNodegroupStackTasksMutex.Lock() + defer fake.doAllNodegroupStackTasksMutex.Unlock() + fake.DoAllNodegroupStackTasksStub = nil + fake.doAllNodegroupStackTasksReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) DoAllNodegroupStackTasksReturnsOnCall(i int, result1 error) { + fake.doAllNodegroupStackTasksMutex.Lock() + defer fake.doAllNodegroupStackTasksMutex.Unlock() + fake.DoAllNodegroupStackTasksStub = nil + if fake.doAllNodegroupStackTasksReturnsOnCall == nil { + fake.doAllNodegroupStackTasksReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.doAllNodegroupStackTasksReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) DoesAWSNodeUseIRSA(arg1 v1alpha5.ClusterProvider, arg2 kubernetes.Interface) (bool, error) { + fake.doesAWSNodeUseIRSAMutex.Lock() + ret, specificReturn := fake.doesAWSNodeUseIRSAReturnsOnCall[len(fake.doesAWSNodeUseIRSAArgsForCall)] + fake.doesAWSNodeUseIRSAArgsForCall = append(fake.doesAWSNodeUseIRSAArgsForCall, struct { + arg1 v1alpha5.ClusterProvider + arg2 kubernetes.Interface + }{arg1, arg2}) + stub := fake.DoesAWSNodeUseIRSAStub + fakeReturns := fake.doesAWSNodeUseIRSAReturns + fake.recordInvocation("DoesAWSNodeUseIRSA", []interface{}{arg1, arg2}) + fake.doesAWSNodeUseIRSAMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeNodeGroupInitialiser) DoesAWSNodeUseIRSACallCount() int { + fake.doesAWSNodeUseIRSAMutex.RLock() + defer fake.doesAWSNodeUseIRSAMutex.RUnlock() + return len(fake.doesAWSNodeUseIRSAArgsForCall) +} + +func (fake *FakeNodeGroupInitialiser) DoesAWSNodeUseIRSACalls(stub func(v1alpha5.ClusterProvider, kubernetes.Interface) (bool, error)) { + fake.doesAWSNodeUseIRSAMutex.Lock() + defer fake.doesAWSNodeUseIRSAMutex.Unlock() + fake.DoesAWSNodeUseIRSAStub = stub +} + +func (fake *FakeNodeGroupInitialiser) DoesAWSNodeUseIRSAArgsForCall(i int) (v1alpha5.ClusterProvider, kubernetes.Interface) { + fake.doesAWSNodeUseIRSAMutex.RLock() + defer fake.doesAWSNodeUseIRSAMutex.RUnlock() + argsForCall := fake.doesAWSNodeUseIRSAArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeNodeGroupInitialiser) DoesAWSNodeUseIRSAReturns(result1 bool, result2 error) { + fake.doesAWSNodeUseIRSAMutex.Lock() + defer fake.doesAWSNodeUseIRSAMutex.Unlock() + fake.DoesAWSNodeUseIRSAStub = nil + fake.doesAWSNodeUseIRSAReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeNodeGroupInitialiser) DoesAWSNodeUseIRSAReturnsOnCall(i int, result1 bool, result2 error) { + fake.doesAWSNodeUseIRSAMutex.Lock() + defer fake.doesAWSNodeUseIRSAMutex.Unlock() + fake.DoesAWSNodeUseIRSAStub = nil + if fake.doesAWSNodeUseIRSAReturnsOnCall == nil { + fake.doesAWSNodeUseIRSAReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.doesAWSNodeUseIRSAReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeNodeGroupInitialiser) ExpandInstanceSelectorOptions(arg1 []v1alpha5.NodePool, arg2 []string) error { + var arg1Copy []v1alpha5.NodePool + if arg1 != nil { + arg1Copy = make([]v1alpha5.NodePool, len(arg1)) + copy(arg1Copy, arg1) + } + var arg2Copy []string + if arg2 != nil { + arg2Copy = make([]string, len(arg2)) + copy(arg2Copy, arg2) + } + fake.expandInstanceSelectorOptionsMutex.Lock() + ret, specificReturn := fake.expandInstanceSelectorOptionsReturnsOnCall[len(fake.expandInstanceSelectorOptionsArgsForCall)] + fake.expandInstanceSelectorOptionsArgsForCall = append(fake.expandInstanceSelectorOptionsArgsForCall, struct { + arg1 []v1alpha5.NodePool + arg2 []string + }{arg1Copy, arg2Copy}) + stub := fake.ExpandInstanceSelectorOptionsStub + fakeReturns := fake.expandInstanceSelectorOptionsReturns + fake.recordInvocation("ExpandInstanceSelectorOptions", []interface{}{arg1Copy, arg2Copy}) + fake.expandInstanceSelectorOptionsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeNodeGroupInitialiser) ExpandInstanceSelectorOptionsCallCount() int { + fake.expandInstanceSelectorOptionsMutex.RLock() + defer fake.expandInstanceSelectorOptionsMutex.RUnlock() + return len(fake.expandInstanceSelectorOptionsArgsForCall) +} + +func (fake *FakeNodeGroupInitialiser) ExpandInstanceSelectorOptionsCalls(stub func([]v1alpha5.NodePool, []string) error) { + fake.expandInstanceSelectorOptionsMutex.Lock() + defer fake.expandInstanceSelectorOptionsMutex.Unlock() + fake.ExpandInstanceSelectorOptionsStub = stub +} + +func (fake *FakeNodeGroupInitialiser) ExpandInstanceSelectorOptionsArgsForCall(i int) ([]v1alpha5.NodePool, []string) { + fake.expandInstanceSelectorOptionsMutex.RLock() + defer fake.expandInstanceSelectorOptionsMutex.RUnlock() + argsForCall := fake.expandInstanceSelectorOptionsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeNodeGroupInitialiser) ExpandInstanceSelectorOptionsReturns(result1 error) { + fake.expandInstanceSelectorOptionsMutex.Lock() + defer fake.expandInstanceSelectorOptionsMutex.Unlock() + fake.ExpandInstanceSelectorOptionsStub = nil + fake.expandInstanceSelectorOptionsReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) ExpandInstanceSelectorOptionsReturnsOnCall(i int, result1 error) { + fake.expandInstanceSelectorOptionsMutex.Lock() + defer fake.expandInstanceSelectorOptionsMutex.Unlock() + fake.ExpandInstanceSelectorOptionsStub = nil + if fake.expandInstanceSelectorOptionsReturnsOnCall == nil { + fake.expandInstanceSelectorOptionsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.expandInstanceSelectorOptionsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) NewAWSSelectorSession(arg1 v1alpha5.ClusterProvider) *selector.Selector { + fake.newAWSSelectorSessionMutex.Lock() + ret, specificReturn := fake.newAWSSelectorSessionReturnsOnCall[len(fake.newAWSSelectorSessionArgsForCall)] + fake.newAWSSelectorSessionArgsForCall = append(fake.newAWSSelectorSessionArgsForCall, struct { + arg1 v1alpha5.ClusterProvider + }{arg1}) + stub := fake.NewAWSSelectorSessionStub + fakeReturns := fake.newAWSSelectorSessionReturns + fake.recordInvocation("NewAWSSelectorSession", []interface{}{arg1}) + fake.newAWSSelectorSessionMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeNodeGroupInitialiser) NewAWSSelectorSessionCallCount() int { + fake.newAWSSelectorSessionMutex.RLock() + defer fake.newAWSSelectorSessionMutex.RUnlock() + return len(fake.newAWSSelectorSessionArgsForCall) +} + +func (fake *FakeNodeGroupInitialiser) NewAWSSelectorSessionCalls(stub func(v1alpha5.ClusterProvider) *selector.Selector) { + fake.newAWSSelectorSessionMutex.Lock() + defer fake.newAWSSelectorSessionMutex.Unlock() + fake.NewAWSSelectorSessionStub = stub +} + +func (fake *FakeNodeGroupInitialiser) NewAWSSelectorSessionArgsForCall(i int) v1alpha5.ClusterProvider { + fake.newAWSSelectorSessionMutex.RLock() + defer fake.newAWSSelectorSessionMutex.RUnlock() + argsForCall := fake.newAWSSelectorSessionArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeNodeGroupInitialiser) NewAWSSelectorSessionReturns(result1 *selector.Selector) { + fake.newAWSSelectorSessionMutex.Lock() + defer fake.newAWSSelectorSessionMutex.Unlock() + fake.NewAWSSelectorSessionStub = nil + fake.newAWSSelectorSessionReturns = struct { + result1 *selector.Selector + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) NewAWSSelectorSessionReturnsOnCall(i int, result1 *selector.Selector) { + fake.newAWSSelectorSessionMutex.Lock() + defer fake.newAWSSelectorSessionMutex.Unlock() + fake.NewAWSSelectorSessionStub = nil + if fake.newAWSSelectorSessionReturnsOnCall == nil { + fake.newAWSSelectorSessionReturnsOnCall = make(map[int]struct { + result1 *selector.Selector + }) + } + fake.newAWSSelectorSessionReturnsOnCall[i] = struct { + result1 *selector.Selector + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) Normalize(arg1 []v1alpha5.NodePool, arg2 *v1alpha5.ClusterMeta) error { + var arg1Copy []v1alpha5.NodePool + if arg1 != nil { + arg1Copy = make([]v1alpha5.NodePool, len(arg1)) + copy(arg1Copy, arg1) + } + fake.normalizeMutex.Lock() + ret, specificReturn := fake.normalizeReturnsOnCall[len(fake.normalizeArgsForCall)] + fake.normalizeArgsForCall = append(fake.normalizeArgsForCall, struct { + arg1 []v1alpha5.NodePool + arg2 *v1alpha5.ClusterMeta + }{arg1Copy, arg2}) + stub := fake.NormalizeStub + fakeReturns := fake.normalizeReturns + fake.recordInvocation("Normalize", []interface{}{arg1Copy, arg2}) + fake.normalizeMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeNodeGroupInitialiser) NormalizeCallCount() int { + fake.normalizeMutex.RLock() + defer fake.normalizeMutex.RUnlock() + return len(fake.normalizeArgsForCall) +} + +func (fake *FakeNodeGroupInitialiser) NormalizeCalls(stub func([]v1alpha5.NodePool, *v1alpha5.ClusterMeta) error) { + fake.normalizeMutex.Lock() + defer fake.normalizeMutex.Unlock() + fake.NormalizeStub = stub +} + +func (fake *FakeNodeGroupInitialiser) NormalizeArgsForCall(i int) ([]v1alpha5.NodePool, *v1alpha5.ClusterMeta) { + fake.normalizeMutex.RLock() + defer fake.normalizeMutex.RUnlock() + argsForCall := fake.normalizeArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeNodeGroupInitialiser) NormalizeReturns(result1 error) { + fake.normalizeMutex.Lock() + defer fake.normalizeMutex.Unlock() + fake.NormalizeStub = nil + fake.normalizeReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) NormalizeReturnsOnCall(i int, result1 error) { + fake.normalizeMutex.Lock() + defer fake.normalizeMutex.Unlock() + fake.NormalizeStub = nil + if fake.normalizeReturnsOnCall == nil { + fake.normalizeReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.normalizeReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) ValidateExistingNodeGroupsForCompatibility(arg1 *v1alpha5.ClusterConfig, arg2 manager.StackManager) error { + fake.validateExistingNodeGroupsForCompatibilityMutex.Lock() + ret, specificReturn := fake.validateExistingNodeGroupsForCompatibilityReturnsOnCall[len(fake.validateExistingNodeGroupsForCompatibilityArgsForCall)] + fake.validateExistingNodeGroupsForCompatibilityArgsForCall = append(fake.validateExistingNodeGroupsForCompatibilityArgsForCall, struct { + arg1 *v1alpha5.ClusterConfig + arg2 manager.StackManager + }{arg1, arg2}) + stub := fake.ValidateExistingNodeGroupsForCompatibilityStub + fakeReturns := fake.validateExistingNodeGroupsForCompatibilityReturns + fake.recordInvocation("ValidateExistingNodeGroupsForCompatibility", []interface{}{arg1, arg2}) + fake.validateExistingNodeGroupsForCompatibilityMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeNodeGroupInitialiser) ValidateExistingNodeGroupsForCompatibilityCallCount() int { + fake.validateExistingNodeGroupsForCompatibilityMutex.RLock() + defer fake.validateExistingNodeGroupsForCompatibilityMutex.RUnlock() + return len(fake.validateExistingNodeGroupsForCompatibilityArgsForCall) +} + +func (fake *FakeNodeGroupInitialiser) ValidateExistingNodeGroupsForCompatibilityCalls(stub func(*v1alpha5.ClusterConfig, manager.StackManager) error) { + fake.validateExistingNodeGroupsForCompatibilityMutex.Lock() + defer fake.validateExistingNodeGroupsForCompatibilityMutex.Unlock() + fake.ValidateExistingNodeGroupsForCompatibilityStub = stub +} + +func (fake *FakeNodeGroupInitialiser) ValidateExistingNodeGroupsForCompatibilityArgsForCall(i int) (*v1alpha5.ClusterConfig, manager.StackManager) { + fake.validateExistingNodeGroupsForCompatibilityMutex.RLock() + defer fake.validateExistingNodeGroupsForCompatibilityMutex.RUnlock() + argsForCall := fake.validateExistingNodeGroupsForCompatibilityArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeNodeGroupInitialiser) ValidateExistingNodeGroupsForCompatibilityReturns(result1 error) { + fake.validateExistingNodeGroupsForCompatibilityMutex.Lock() + defer fake.validateExistingNodeGroupsForCompatibilityMutex.Unlock() + fake.ValidateExistingNodeGroupsForCompatibilityStub = nil + fake.validateExistingNodeGroupsForCompatibilityReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) ValidateExistingNodeGroupsForCompatibilityReturnsOnCall(i int, result1 error) { + fake.validateExistingNodeGroupsForCompatibilityMutex.Lock() + defer fake.validateExistingNodeGroupsForCompatibilityMutex.Unlock() + fake.ValidateExistingNodeGroupsForCompatibilityStub = nil + if fake.validateExistingNodeGroupsForCompatibilityReturnsOnCall == nil { + fake.validateExistingNodeGroupsForCompatibilityReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.validateExistingNodeGroupsForCompatibilityReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) ValidateLegacySubnetsForNodeGroups(arg1 *v1alpha5.ClusterConfig, arg2 v1alpha5.ClusterProvider) error { + fake.validateLegacySubnetsForNodeGroupsMutex.Lock() + ret, specificReturn := fake.validateLegacySubnetsForNodeGroupsReturnsOnCall[len(fake.validateLegacySubnetsForNodeGroupsArgsForCall)] + fake.validateLegacySubnetsForNodeGroupsArgsForCall = append(fake.validateLegacySubnetsForNodeGroupsArgsForCall, struct { + arg1 *v1alpha5.ClusterConfig + arg2 v1alpha5.ClusterProvider + }{arg1, arg2}) + stub := fake.ValidateLegacySubnetsForNodeGroupsStub + fakeReturns := fake.validateLegacySubnetsForNodeGroupsReturns + fake.recordInvocation("ValidateLegacySubnetsForNodeGroups", []interface{}{arg1, arg2}) + fake.validateLegacySubnetsForNodeGroupsMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeNodeGroupInitialiser) ValidateLegacySubnetsForNodeGroupsCallCount() int { + fake.validateLegacySubnetsForNodeGroupsMutex.RLock() + defer fake.validateLegacySubnetsForNodeGroupsMutex.RUnlock() + return len(fake.validateLegacySubnetsForNodeGroupsArgsForCall) +} + +func (fake *FakeNodeGroupInitialiser) ValidateLegacySubnetsForNodeGroupsCalls(stub func(*v1alpha5.ClusterConfig, v1alpha5.ClusterProvider) error) { + fake.validateLegacySubnetsForNodeGroupsMutex.Lock() + defer fake.validateLegacySubnetsForNodeGroupsMutex.Unlock() + fake.ValidateLegacySubnetsForNodeGroupsStub = stub +} + +func (fake *FakeNodeGroupInitialiser) ValidateLegacySubnetsForNodeGroupsArgsForCall(i int) (*v1alpha5.ClusterConfig, v1alpha5.ClusterProvider) { + fake.validateLegacySubnetsForNodeGroupsMutex.RLock() + defer fake.validateLegacySubnetsForNodeGroupsMutex.RUnlock() + argsForCall := fake.validateLegacySubnetsForNodeGroupsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeNodeGroupInitialiser) ValidateLegacySubnetsForNodeGroupsReturns(result1 error) { + fake.validateLegacySubnetsForNodeGroupsMutex.Lock() + defer fake.validateLegacySubnetsForNodeGroupsMutex.Unlock() + fake.ValidateLegacySubnetsForNodeGroupsStub = nil + fake.validateLegacySubnetsForNodeGroupsReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) ValidateLegacySubnetsForNodeGroupsReturnsOnCall(i int, result1 error) { + fake.validateLegacySubnetsForNodeGroupsMutex.Lock() + defer fake.validateLegacySubnetsForNodeGroupsMutex.Unlock() + fake.ValidateLegacySubnetsForNodeGroupsStub = nil + if fake.validateLegacySubnetsForNodeGroupsReturnsOnCall == nil { + fake.validateLegacySubnetsForNodeGroupsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.validateLegacySubnetsForNodeGroupsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeNodeGroupInitialiser) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.doAllNodegroupStackTasksMutex.RLock() + defer fake.doAllNodegroupStackTasksMutex.RUnlock() + fake.doesAWSNodeUseIRSAMutex.RLock() + defer fake.doesAWSNodeUseIRSAMutex.RUnlock() + fake.expandInstanceSelectorOptionsMutex.RLock() + defer fake.expandInstanceSelectorOptionsMutex.RUnlock() + fake.newAWSSelectorSessionMutex.RLock() + defer fake.newAWSSelectorSessionMutex.RUnlock() + fake.normalizeMutex.RLock() + defer fake.normalizeMutex.RUnlock() + fake.validateExistingNodeGroupsForCompatibilityMutex.RLock() + defer fake.validateExistingNodeGroupsForCompatibilityMutex.RUnlock() + fake.validateLegacySubnetsForNodeGroupsMutex.RLock() + defer fake.validateLegacySubnetsForNodeGroupsMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeNodeGroupInitialiser) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ eks.NodeGroupInitialiser = new(FakeNodeGroupInitialiser) diff --git a/pkg/eks/nodegroup.go b/pkg/eks/nodegroup.go index 9beece7a36..accfc2ce82 100644 --- a/pkg/eks/nodegroup.go +++ b/pkg/eks/nodegroup.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "strings" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" @@ -25,8 +24,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" ) @@ -207,58 +204,6 @@ type KubeNodeGroup interface { GetAMIFamily() string } -// WaitForNodes waits till the nodes are ready -func (c *ClusterProvider) WaitForNodes(clientSet kubernetes.Interface, ng KubeNodeGroup) error { - minSize := ng.Size() - if minSize == 0 { - return nil - } - timer := time.After(c.Provider.WaitTimeout()) - timeout := false - readyNodes := sets.NewString() - watcher, err := clientSet.CoreV1().Nodes().Watch(context.TODO(), ng.ListOptions()) - if err != nil { - return errors.Wrap(err, "creating node watcher") - } - - counter, err := getNodes(clientSet, ng) - if err != nil { - return errors.Wrap(err, "listing nodes") - } - - logger.Info("waiting for at least %d node(s) to become ready in %q", minSize, ng.NameString()) - for !timeout && counter < minSize { - select { - case event := <-watcher.ResultChan(): - logger.Debug("event = %#v", event) - if event.Object != nil && event.Type != watch.Deleted { - if node, ok := event.Object.(*corev1.Node); ok { - if isNodeReady(node) { - readyNodes.Insert(node.Name) - counter = readyNodes.Len() - logger.Debug("node %q is ready in %q", node.Name, ng.NameString()) - } else { - logger.Debug("node %q seen in %q, but not ready yet", node.Name, ng.NameString()) - logger.Debug("node = %#v", *node) - } - } - } - case <-timer: - timeout = true - } - } - watcher.Stop() - if timeout { - return fmt.Errorf("timed out (after %s) waiting for at least %d nodes to join the cluster and become ready in %q", c.Provider.WaitTimeout(), minSize, ng.NameString()) - } - - if _, err = getNodes(clientSet, ng); err != nil { - return errors.Wrap(err, "re-listing nodes") - } - - return nil -} - // GetNodeGroupIAM retrieves the IAM configuration of the given nodegroup func (c *ClusterProvider) GetNodeGroupIAM(stackManager manager.StackManager, ng *api.NodeGroup) error { stacks, err := stackManager.DescribeNodeGroupStacks() @@ -299,7 +244,8 @@ func getAWSNodeSAARNAnnotation(clientSet kubernetes.Interface) (string, error) { return clusterDaemonSet.Annotations[api.AnnotationEKSRoleARN], nil } -func DoesAWSNodeUseIRSA(provider api.ClusterProvider, clientSet kubernetes.Interface) (bool, error) { +// DoesAWSNodeUseIRSA evaluates whether an aws-node uses IRSA +func (n *NodeGroupService) DoesAWSNodeUseIRSA(provider api.ClusterProvider, clientSet kubernetes.Interface) (bool, error) { roleArn, err := getAWSNodeSAARNAnnotation(clientSet) if err != nil { return false, errors.Wrap(err, "error retrieving aws-node arn") diff --git a/pkg/eks/nodegroup_service.go b/pkg/eks/nodegroup_service.go index 2107d113a6..ac068f8c2a 100644 --- a/pkg/eks/nodegroup_service.go +++ b/pkg/eks/nodegroup_service.go @@ -1,7 +1,9 @@ package eks import ( + "fmt" "reflect" + "strings" "github.com/aws/amazon-ec2-instance-selector/v2/pkg/bytequantity" "github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector" @@ -10,34 +12,55 @@ import ( "github.com/pkg/errors" "github.com/weaveworks/eksctl/pkg/ami" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/cfn/manager" + "github.com/weaveworks/eksctl/pkg/kubernetes" "github.com/weaveworks/eksctl/pkg/ssh" + "github.com/weaveworks/eksctl/pkg/utils/tasks" + "github.com/weaveworks/eksctl/pkg/vpc" ) //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate -// InstanceSelector selects a set of instance types matching the specified instance selector criteria //counterfeiter:generate -o fakes/fake_instance_selector.go . InstanceSelector +// InstanceSelector selects a set of instance types matching the specified instance selector criteria type InstanceSelector interface { // Filter returns a set of instance types matching the specified instance selector filters Filter(selector.Filters) ([]string, error) } +//counterfeiter:generate -o fakes/fake_nodegroup_initialiser.go . NodeGroupInitialiser +// NodeGroupInitialiser is an interface that provides helpers for nodegroup creation. +type NodeGroupInitialiser interface { + Normalize(nodePools []api.NodePool, clusterMeta *api.ClusterMeta) error + ExpandInstanceSelectorOptions(nodePools []api.NodePool, clusterAZs []string) error + NewAWSSelectorSession(provider api.ClusterProvider) *selector.Selector + ValidateLegacySubnetsForNodeGroups(spec *api.ClusterConfig, provider api.ClusterProvider) error + DoesAWSNodeUseIRSA(provider api.ClusterProvider, clientSet kubernetes.Interface) (bool, error) + DoAllNodegroupStackTasks(taskTree *tasks.TaskTree, region, name string) error + ValidateExistingNodeGroupsForCompatibility(cfg *api.ClusterConfig, stackManager manager.StackManager) error +} + // A NodeGroupService provides helpers for nodegroup creation type NodeGroupService struct { - provider api.ClusterProvider + Provider api.ClusterProvider instanceSelector InstanceSelector } // NewNodeGroupService creates a new NodeGroupService func NewNodeGroupService(provider api.ClusterProvider, instanceSelector InstanceSelector) *NodeGroupService { return &NodeGroupService{ - provider: provider, + Provider: provider, instanceSelector: instanceSelector, } } const defaultCPUArch = "x86_64" +// NewAWSSelectorSession returns a new instance of Selector provided an aws session +func (m *NodeGroupService) NewAWSSelectorSession(provider api.ClusterProvider) *selector.Selector { + return selector.New(provider.Session()) +} + // Normalize normalizes nodegroups func (m *NodeGroupService) Normalize(nodePools []api.NodePool, clusterMeta *api.ClusterMeta) error { for _, np := range nodePools { @@ -45,7 +68,7 @@ func (m *NodeGroupService) Normalize(nodePools []api.NodePool, clusterMeta *api. case *api.NodeGroup: // resolve AMI if !api.IsAMI(ng.AMI) { - if err := ResolveAMI(m.provider, clusterMeta.Version, ng); err != nil { + if err := ResolveAMI(m.Provider, clusterMeta.Version, ng); err != nil { return err } } else { @@ -60,7 +83,7 @@ func (m *NodeGroupService) Normalize(nodePools []api.NodePool, clusterMeta *api. ng := np.BaseNodeGroup() if ng.AMI != "" { - if err := ami.Use(m.provider.EC2(), ng); err != nil { + if err := ami.Use(m.Provider.EC2(), ng); err != nil { return err } } @@ -68,7 +91,7 @@ func (m *NodeGroupService) Normalize(nodePools []api.NodePool, clusterMeta *api. // fingerprint, so if unique keys are provided, each will get // loaded and used as intended and there is no need to have // nodegroup name in the key name - publicKeyName, err := ssh.LoadKey(ng.SSH, clusterMeta.Name, ng.Name, m.provider.EC2()) + publicKeyName, err := ssh.LoadKey(ng.SSH, clusterMeta.Name, ng.Name, m.Provider.EC2()) if err != nil { return err } @@ -177,3 +200,69 @@ func (m *NodeGroupService) expandInstanceSelector(ins *api.InstanceSelector, azs return instanceTypes, nil } + +func (m *NodeGroupService) ValidateLegacySubnetsForNodeGroups(spec *api.ClusterConfig, provider api.ClusterProvider) error { + return vpc.ValidateLegacySubnetsForNodeGroups(spec, provider) +} + +// DoAllNodegroupStackTasks iterates over nodegroup tasks and returns any errors. +func (m *NodeGroupService) DoAllNodegroupStackTasks(taskTree *tasks.TaskTree, region, name string) error { + logger.Info(taskTree.Describe()) + errs := taskTree.DoAllSync() + if len(errs) > 0 { + logger.Info("%d error(s) occurred and nodegroups haven't been created properly, you may wish to check CloudFormation console", len(errs)) + logger.Info("to cleanup resources, run 'eksctl delete nodegroup --region=%s --cluster=%s --name=' for each of the failed nodegroup", region, name) + for _, err := range errs { + if err != nil { + logger.Critical("%s\n", err.Error()) + } + } + return fmt.Errorf("failed to create nodegroups for cluster %q", name) + } + return nil +} + +// ValidateExistingNodeGroupsForCompatibility looks at each of the existing nodegroups and +// validates configuration, if it find issues it logs messages +func (m *NodeGroupService) ValidateExistingNodeGroupsForCompatibility(cfg *api.ClusterConfig, stackManager manager.StackManager) error { + infoByNodeGroup, err := stackManager.DescribeNodeGroupStacksAndResources() + if err != nil { + return errors.Wrap(err, "getting resources for all nodegroup stacks") + } + if len(infoByNodeGroup) == 0 { + return nil + } + + logger.Info("checking security group configuration for all nodegroups") + incompatibleNodeGroups := []string{} + for ng, info := range infoByNodeGroup { + if stackManager.StackStatusIsNotTransitional(info.Stack) { + isCompatible, err := isNodeGroupCompatible(ng, info) + if err != nil { + return err + } + if isCompatible { + logger.Debug("nodegroup %q is compatible", ng) + } else { + logger.Debug("nodegroup %q is incompatible", ng) + incompatibleNodeGroups = append(incompatibleNodeGroups, ng) + } + } + } + + numIncompatibleNodeGroups := len(incompatibleNodeGroups) + if numIncompatibleNodeGroups == 0 { + logger.Info("all nodegroups have up-to-date configuration") + return nil + } + + logger.Critical("found %d nodegroup(s) (%s) without shared security group, cluster networking maybe be broken", + numIncompatibleNodeGroups, strings.Join(incompatibleNodeGroups, ", ")) + logger.Critical("it's recommended to create new nodegroups, then delete old ones") + if cfg.VPC.SharedNodeSecurityGroup != "" { + logger.Critical("as a temporary fix, you can patch the configuration and add each of these nodegroup(s) to %q", + cfg.VPC.SharedNodeSecurityGroup) + } + + return nil +}