diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go index d48d27aeb4b26..5a1c5313f03c2 100644 --- a/cmd/kops/integration_test.go +++ b/cmd/kops/integration_test.go @@ -328,6 +328,17 @@ func TestDockerCustom(t *testing.T) { newIntegrationTest("docker.example.com", "docker-custom").runTestCloudformation(t) } +// TestAPIServerNodes runs a simple configuration with dedicated apiserver nodes +func TestAPIServerNodes(t *testing.T) { + featureflag.ParseFlags("+APIServerNodes") + unsetFeatureFlags := func() { + featureflag.ParseFlags("-APIServerNodes") + } + defer unsetFeatureFlags() + + newIntegrationTest("minimal.example.com", "apiservernodes").runTestCloudformation(t) +} + func (i *integrationTest) runTest(t *testing.T, h *testutils.IntegrationTestHarness, expectedDataFilenames []string, tfFileName string, expectedTfFileName string, phase *cloudup.Phase) { ctx := context.Background() diff --git a/cmd/kops/rollingupdatecluster.go b/cmd/kops/rollingupdatecluster.go index 84aa3ce237299..90ec626b3165b 100644 --- a/cmd/kops/rollingupdatecluster.go +++ b/cmd/kops/rollingupdatecluster.go @@ -177,6 +177,11 @@ func NewCmdRollingUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command { Example: rollingupdateExample, } + allRoles := make([]string, 0, len(kopsapi.AllInstanceGroupRoles)) + for _, r := range kopsapi.AllInstanceGroupRoles { + allRoles = append(allRoles, string(r)) + } + cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Perform rolling update immediately, without --yes rolling-update executes a dry-run") cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force rolling update, even if no changes") cmd.Flags().BoolVar(&options.CloudOnly, "cloudonly", options.CloudOnly, "Perform rolling update without confirming progress with k8s") @@ -189,7 +194,7 @@ func NewCmdRollingUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command { cmd.Flags().DurationVar(&options.PostDrainDelay, "post-drain-delay", options.PostDrainDelay, "Time to wait after draining each node") cmd.Flags().BoolVarP(&options.Interactive, "interactive", "i", options.Interactive, "Prompt to continue after each instance is updated") cmd.Flags().StringSliceVar(&options.InstanceGroups, "instance-group", options.InstanceGroups, "List of instance groups to update (defaults to all if not specified)") - cmd.Flags().StringSliceVar(&options.InstanceGroupRoles, "instance-group-roles", options.InstanceGroupRoles, "If specified, only instance groups of the specified role will be updated (e.g. Master,Node,Bastion)") + cmd.Flags().StringSliceVar(&options.InstanceGroupRoles, "instance-group-roles", options.InstanceGroupRoles, "If specified, only instance groups of the specified role will be updated ("+strings.Join(allRoles, ",")+")") cmd.Flags().BoolVar(&options.FailOnDrainError, "fail-on-drain-error", true, "The rolling-update will fail if draining a node fails.") cmd.Flags().BoolVar(&options.FailOnValidate, "fail-on-validate-error", true, "The rolling-update will fail if the cluster fails to validate.") @@ -302,11 +307,14 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer if len(options.InstanceGroupRoles) != 0 { var filtered []*kopsapi.InstanceGroup - for _, ig := range instanceGroups { - for _, role := range options.InstanceGroupRoles { - if ig.Spec.Role == kopsapi.InstanceGroupRole(strings.Title(strings.ToLower(role))) { + for _, role := range options.InstanceGroupRoles { + s, f := kopsapi.ParseInstanceGroupRole(role, true) + if !f { + return fmt.Errorf("invalid instance group role %q", role) + } + for _, ig := range instanceGroups { + if ig.Spec.Role == s { filtered = append(filtered, ig) - continue } } } diff --git a/docs/cli/kops_create_instancegroup.md b/docs/cli/kops_create_instancegroup.md index 9299dfb0fb57e..af81e3a8676b4 100644 --- a/docs/cli/kops_create_instancegroup.md +++ b/docs/cli/kops_create_instancegroup.md @@ -36,7 +36,7 @@ kops create instancegroup [flags] --edit If true, an editor will be opened to edit default values. (default true) -h, --help help for instancegroup -o, --output string Output format. One of json|yaml - --role string Type of instance group to create (Node,Master,Bastion) (default "Node") + --role string Type of instance group to create (Master,APIServer,Node,Bastion) (default "Node") --subnet strings Subnet in which to create instance group. One of Availability Zone like eu-west-1a or a comma-separated list of multiple Availability Zones. ``` diff --git a/docs/cli/kops_rolling-update_cluster.md b/docs/cli/kops_rolling-update_cluster.md index bda4e1241f92e..1f50810a5d934 100644 --- a/docs/cli/kops_rolling-update_cluster.md +++ b/docs/cli/kops_rolling-update_cluster.md @@ -75,7 +75,7 @@ kops rolling-update cluster [flags] --force Force rolling update, even if no changes -h, --help help for cluster --instance-group strings List of instance groups to update (defaults to all if not specified) - --instance-group-roles strings If specified, only instance groups of the specified role will be updated (e.g. Master,Node,Bastion) + --instance-group-roles strings If specified, only instance groups of the specified role will be updated (Master,APIServer,Node,Bastion) -i, --interactive Prompt to continue after each instance is updated --master-interval duration Time to wait between restarting masters (default 15s) --node-interval duration Time to wait between restarting nodes (default 15s) diff --git a/nodeup/pkg/model/context.go b/nodeup/pkg/model/context.go index 3bfa929c4526d..9c2417712ab8f 100644 --- a/nodeup/pkg/model/context.go +++ b/nodeup/pkg/model/context.go @@ -57,6 +57,9 @@ type NodeupModelContext struct { // IsMaster is true if the InstanceGroup has a role of master (populated by Init) IsMaster bool + // HasAPIServer is true if the InstanceGroup has a role of master or apiserver (pupulated by Init) + HasAPIServer bool + kubernetesVersion semver.Version bootstrapCerts map[string]*nodetasks.BootstrapCert } @@ -70,10 +73,15 @@ func (c *NodeupModelContext) Init() error { c.kubernetesVersion = *k8sVersion c.bootstrapCerts = map[string]*nodetasks.BootstrapCert{} - if c.NodeupConfig.InstanceGroupRole == kops.InstanceGroupRoleMaster { + role := c.NodeupConfig.InstanceGroupRole + + if role == kops.InstanceGroupRoleMaster { c.IsMaster = true } + if role == kops.InstanceGroupRoleMaster || role == kops.InstanceGroupRoleAPIServer { + c.HasAPIServer = true + } return nil } diff --git a/nodeup/pkg/model/etcd_manager_tls.go b/nodeup/pkg/model/etcd_manager_tls.go index 1388281a4001e..370d54e6ed5cb 100644 --- a/nodeup/pkg/model/etcd_manager_tls.go +++ b/nodeup/pkg/model/etcd_manager_tls.go @@ -32,17 +32,26 @@ var _ fi.ModelBuilder = &EtcdManagerTLSBuilder{} // Build is responsible for TLS configuration for etcd-manager func (b *EtcdManagerTLSBuilder) Build(ctx *fi.ModelBuilderContext) error { - if !b.IsMaster || !b.UseEtcdManager() { + if !b.HasAPIServer || !b.UseEtcdManager() { return nil } + // We also dynamically generate the client keypair for apiserver + if err := b.buildKubeAPIServerKeypair(ctx); err != nil { + return err + } + for _, k := range []string{"main", "events"} { d := "/etc/kubernetes/pki/etcd-manager-" + k keys := make(map[string]string) - keys["etcd-manager-ca"] = "etcd-manager-ca-" + k - keys["etcd-peers-ca"] = "etcd-peers-ca-" + k - // Because API server can only have a single client-cert, we need to share a client CA + + // Only nodes running etcd need the peers CA + if b.IsMaster { + keys["etcd-manager-ca"] = "etcd-manager-ca-" + k + keys["etcd-peers-ca"] = "etcd-peers-ca-" + k + } + // Because API server can only have a single client certificate for etcd, we need to share a client CA keys["etcd-clients-ca"] = "etcd-clients-ca" for fileName, keystoreName := range keys { @@ -63,10 +72,6 @@ func (b *EtcdManagerTLSBuilder) Build(ctx *fi.ModelBuilderContext) error { } } - // We also dynamically generate the client keypair for apiserver - if err := b.buildKubeAPIServerKeypair(ctx); err != nil { - return err - } return nil } diff --git a/nodeup/pkg/model/kube_apiserver.go b/nodeup/pkg/model/kube_apiserver.go index 45a90fe33a462..5e847edc5654a 100644 --- a/nodeup/pkg/model/kube_apiserver.go +++ b/nodeup/pkg/model/kube_apiserver.go @@ -52,7 +52,7 @@ var _ fi.ModelBuilder = &KubeAPIServerBuilder{} // Build is responsible for generating the configuration for the kube-apiserver func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error { - if !b.IsMaster { + if !b.HasAPIServer { return nil } @@ -316,19 +316,29 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) { } } + var mainEtcdCluster, eventsEtcdCluster string + if b.IsMaster { + mainEtcdCluster = "https://127.0.0.1:4001" + eventsEtcdCluster = "https://127.0.0.1:4002" + } else { + host := b.Cluster.ObjectMeta.Name + mainEtcdCluster = "https://main.etcd." + host + ":4001" + eventsEtcdCluster = "https://events.etcd." + host + ":4002" + } + if b.UseEtcdManager() && b.UseEtcdTLS() { basedir := "/etc/kubernetes/pki/kube-apiserver" kubeAPIServer.EtcdCAFile = filepath.Join(basedir, "etcd-ca.crt") kubeAPIServer.EtcdCertFile = filepath.Join(basedir, "etcd-client.crt") kubeAPIServer.EtcdKeyFile = filepath.Join(basedir, "etcd-client.key") - kubeAPIServer.EtcdServers = []string{"https://127.0.0.1:4001"} - kubeAPIServer.EtcdServersOverrides = []string{"/events#https://127.0.0.1:4002"} + kubeAPIServer.EtcdServers = []string{mainEtcdCluster} + kubeAPIServer.EtcdServersOverrides = []string{"/events#" + eventsEtcdCluster} } else if b.UseEtcdTLS() { kubeAPIServer.EtcdCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt") kubeAPIServer.EtcdCertFile = filepath.Join(b.PathSrvKubernetes(), "etcd-client.pem") kubeAPIServer.EtcdKeyFile = filepath.Join(b.PathSrvKubernetes(), "etcd-client-key.pem") - kubeAPIServer.EtcdServers = []string{"https://127.0.0.1:4001"} - kubeAPIServer.EtcdServersOverrides = []string{"/events#https://127.0.0.1:4002"} + kubeAPIServer.EtcdServers = []string{mainEtcdCluster} + kubeAPIServer.EtcdServersOverrides = []string{"/events#" + eventsEtcdCluster} } // @check if we are using secure kubelet client certificates diff --git a/nodeup/pkg/model/kubelet.go b/nodeup/pkg/model/kubelet.go index e9f52d7a504ba..5e356c2ff768f 100644 --- a/nodeup/pkg/model/kubelet.go +++ b/nodeup/pkg/model/kubelet.go @@ -421,6 +421,7 @@ func (b *KubeletBuilder) addContainerizedMounter(c *fi.ModelBuilderContext) erro // buildKubeletConfigSpec returns the kubeletconfig for the specified instanceGroup func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, error) { isMaster := b.IsMaster + isAPIServer := b.InstanceGroup.Spec.Role == kops.InstanceGroupRoleAPIServer // Merge KubeletConfig for NodeLabels c := b.NodeupConfig.KubeletConfig @@ -490,6 +491,10 @@ func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, erro // (Even though the value is empty, we still expect =:) c.Taints = append(c.Taints, nodelabels.RoleLabelMaster16+"=:"+string(v1.TaintEffectNoSchedule)) } + if len(c.Taints) == 0 && isAPIServer { + // (Even though the value is empty, we still expect =:) + c.Taints = append(c.Taints, nodelabels.RoleLabelAPIServer16+"=:"+string(v1.TaintEffectNoSchedule)) + } // Enable scheduling since it can be controlled via taints. c.RegisterSchedulable = fi.Bool(true) diff --git a/nodeup/pkg/model/secrets.go b/nodeup/pkg/model/secrets.go index 3c46014efad74..4b9fcc21ecb9f 100644 --- a/nodeup/pkg/model/secrets.go +++ b/nodeup/pkg/model/secrets.go @@ -69,8 +69,8 @@ func (b *SecretBuilder) Build(c *fi.ModelBuilderContext) error { } } - // if we are not a master we can stop here - if !b.IsMaster { + // If we do not run the Kubernetes API server we can stop here. + if !b.HasAPIServer { return nil } diff --git a/pkg/apis/kops/instancegroup.go b/pkg/apis/kops/instancegroup.go index 31aff438e1083..9d07c3c16c7e3 100644 --- a/pkg/apis/kops/instancegroup.go +++ b/pkg/apis/kops/instancegroup.go @@ -18,7 +18,6 @@ package kops import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" ) const ( @@ -59,12 +58,15 @@ const ( InstanceGroupRoleNode InstanceGroupRole = "Node" // InstanceGroupRoleBastion is a bastion role InstanceGroupRoleBastion InstanceGroupRole = "Bastion" + // InstanceGroupRoleAPIServer is an API server role + InstanceGroupRoleAPIServer InstanceGroupRole = "APIServer" ) // AllInstanceGroupRoles is a slice of all valid InstanceGroupRole values var AllInstanceGroupRoles = []InstanceGroupRole{ - InstanceGroupRoleNode, InstanceGroupRoleMaster, + InstanceGroupRoleAPIServer, + InstanceGroupRoleNode, InstanceGroupRoleBastion, } @@ -286,27 +288,32 @@ func (g *InstanceGroup) IsMaster() bool { switch g.Spec.Role { case InstanceGroupRoleMaster: return true - case InstanceGroupRoleNode: - return false - case InstanceGroupRoleBastion: + default: return false + } +} + +// IsAPIServerOnly checks if instanceGroup runs only the API Server +func (g *InstanceGroup) IsAPIServerOnly() bool { + switch g.Spec.Role { + case InstanceGroupRoleAPIServer: + return true default: - klog.Fatalf("Role not set in group %v", g) return false } } +// hasAPIServer checks if instanceGroup runs an API Server +func (g *InstanceGroup) HasAPIServer() bool { + return g.IsMaster() || g.IsAPIServerOnly() +} + // IsBastion checks if instanceGroup is a bastion func (g *InstanceGroup) IsBastion() bool { switch g.Spec.Role { - case InstanceGroupRoleMaster: - return false - case InstanceGroupRoleNode: - return false case InstanceGroupRoleBastion: return true default: - klog.Fatalf("Role not set in group %v", g) return false } } diff --git a/pkg/apis/kops/parse.go b/pkg/apis/kops/parse.go index 36e78f2531a24..aea72b52e54b2 100644 --- a/pkg/apis/kops/parse.go +++ b/pkg/apis/kops/parse.go @@ -23,7 +23,10 @@ import ( "k8s.io/kops/upup/pkg/fi/utils" ) -// ParseInstanceGroupRole converts a string to an InstanceGroupRole +// ParseInstanceGroupRole converts a string to an InstanceGroupRole. +// +// If lenient is set to true, the function will match pluralised words too. +// It will return the instance group role and true if a match was found. func ParseInstanceGroupRole(input string, lenient bool) (InstanceGroupRole, bool) { findRole := strings.ToLower(input) if lenient { diff --git a/pkg/apis/kops/util/labels.go b/pkg/apis/kops/util/labels.go index 29d4f37e493a2..13079b323fc3a 100644 --- a/pkg/apis/kops/util/labels.go +++ b/pkg/apis/kops/util/labels.go @@ -25,9 +25,15 @@ func GetNodeRole(node *v1.Node) string { if _, ok := node.Labels["node-role.kubernetes.io/master"]; ok { return "master" } + if _, ok := node.Labels["node-role.kubernetes.io/control-plane"]; ok { + return "control-plane" + } if _, ok := node.Labels["node-role.kubernetes.io/node"]; ok { return "node" } + if _, ok := node.Labels["node-role.kubernetes.io/api-server"]; ok { + return "apiserver" + } // Older label return node.Labels["kubernetes.io/role"] } diff --git a/pkg/apis/kops/validation/instancegroup.go b/pkg/apis/kops/validation/instancegroup.go index 169ffed7e21e0..4cdfa45336ec2 100644 --- a/pkg/apis/kops/validation/instancegroup.go +++ b/pkg/apis/kops/validation/instancegroup.go @@ -47,6 +47,7 @@ func ValidateInstanceGroup(g *kops.InstanceGroup, cloud fi.Cloud) field.ErrorLis } case kops.InstanceGroupRoleNode: case kops.InstanceGroupRoleBastion: + case kops.InstanceGroupRoleAPIServer: default: var supported []string for _, role := range kops.AllInstanceGroupRoles { @@ -186,6 +187,10 @@ func CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, cl allErrs = append(allErrs, ValidateMasterInstanceGroup(g, cluster)...) } + if g.Spec.Role == kops.InstanceGroupRoleAPIServer && kops.CloudProviderID(cluster.Spec.CloudProvider) != kops.CloudProviderAWS { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "role"), "Apiserver role only supported on AWS")) + } + // Check that instance groups are defined in subnets that are defined in the cluster { clusterSubnets := make(map[string]*kops.ClusterSubnetSpec) diff --git a/pkg/apis/kops/validation/instancegroup_test.go b/pkg/apis/kops/validation/instancegroup_test.go index 80263d4eb2d8c..50b087e4b822a 100644 --- a/pkg/apis/kops/validation/instancegroup_test.go +++ b/pkg/apis/kops/validation/instancegroup_test.go @@ -384,3 +384,68 @@ func TestIGUpdatePolicy(t *testing.T) { }) } } + +func TestValidInstanceGroup(t *testing.T) { + grid := []struct { + IG *kops.InstanceGroup + ExpectedErrors int + Description string + }{ + { + + IG: &kops.InstanceGroup{ + ObjectMeta: v1.ObjectMeta{ + Name: "eu-central-1a", + }, + Spec: kops.InstanceGroupSpec{ + Role: kops.InstanceGroupRoleMaster, + Subnets: []string{"eu-central-1a"}, + }, + }, + ExpectedErrors: 0, + Description: "Valid master instance group failed to validate", + }, + { + IG: &kops.InstanceGroup{ + ObjectMeta: v1.ObjectMeta{ + Name: "eu-central-1a", + }, + Spec: kops.InstanceGroupSpec{ + Role: kops.InstanceGroupRoleAPIServer, + Subnets: []string{"eu-central-1a"}, + }, + }, + ExpectedErrors: 0, + Description: "Valid API Server instance group failed to validate", + }, + { + IG: &kops.InstanceGroup{ + ObjectMeta: v1.ObjectMeta{ + Name: "eu-central-1a", + }, + Spec: kops.InstanceGroupSpec{ + Role: kops.InstanceGroupRoleNode, + Subnets: []string{"eu-central-1a"}, + }, + }, + ExpectedErrors: 0, + Description: "Valid node instance group failed to validate", + }, { + IG: &kops.InstanceGroup{ + ObjectMeta: v1.ObjectMeta{ + Name: "eu-central-1a", + }, + Spec: kops.InstanceGroupSpec{ + Role: kops.InstanceGroupRoleBastion, + Subnets: []string{"eu-central-1a"}, + }, + }, + ExpectedErrors: 0, + Description: "Valid bastion instance group failed to validate", + }, + } + for _, g := range grid { + errList := ValidateInstanceGroup(g.IG, nil) + testErrors(t, g.Description, errList, []string{}) + } +} diff --git a/pkg/featureflag/featureflag.go b/pkg/featureflag/featureflag.go index 7a66b72a63fa0..34d10681197a4 100644 --- a/pkg/featureflag/featureflag.go +++ b/pkg/featureflag/featureflag.go @@ -99,6 +99,8 @@ var ( Azure = New("Azure", Bool(false)) // KopsControllerStateStore enables fetching the kops state from kops-controller, instead of requiring access to S3/GCS/etc. KopsControllerStateStore = New("KopsControllerStateStore", Bool(false)) + // APIServerNodes enables ability to provision nodes that only run the kube-apiserver + APIServerNodes = New("APIServerNodes", Bool(false)) ) // FeatureFlag defines a feature flag diff --git a/pkg/instancegroups/rollingupdate.go b/pkg/instancegroups/rollingupdate.go index 01a4f0b45d18d..4e08e7ea5ceed 100644 --- a/pkg/instancegroups/rollingupdate.go +++ b/pkg/instancegroups/rollingupdate.go @@ -98,12 +98,15 @@ func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*cloudinstances.C results := make(map[string]error) masterGroups := make(map[string]*cloudinstances.CloudInstanceGroup) + apiServerGroups := make(map[string]*cloudinstances.CloudInstanceGroup) nodeGroups := make(map[string]*cloudinstances.CloudInstanceGroup) bastionGroups := make(map[string]*cloudinstances.CloudInstanceGroup) for k, group := range groups { switch group.InstanceGroup.Spec.Role { case api.InstanceGroupRoleNode: nodeGroups[k] = group + case api.InstanceGroupRoleAPIServer: + apiServerGroups[k] = group case api.InstanceGroupRoleMaster: masterGroups[k] = group case api.InstanceGroupRoleBastion: @@ -160,6 +163,21 @@ func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*cloudinstances.C } } + // Upgrade API servers + { + for k := range apiServerGroups { + results[k] = fmt.Errorf("function panic apiservers") + } + + for _, k := range sortGroups(apiServerGroups) { + err := c.rollingUpdateInstanceGroup(apiServerGroups[k], c.NodeInterval) + + results[k] = err + + // TODO: Bail on error? + } + } + // Upgrade nodes { // We run nodes in series, even if they are in separate instance groups diff --git a/pkg/model/awsmodel/autoscalinggroup.go b/pkg/model/awsmodel/autoscalinggroup.go index 83f0a82ca16b2..83df7b90241c6 100644 --- a/pkg/model/awsmodel/autoscalinggroup.go +++ b/pkg/model/awsmodel/autoscalinggroup.go @@ -230,7 +230,7 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateHelper(c *fi.ModelBuil t.HTTPPutResponseHopLimit = ig.Spec.InstanceMetadata.HTTPPutResponseHopLimit } - if ig.Spec.Role == kops.InstanceGroupRoleMaster && + if ig.HasAPIServer() && b.APILoadBalancerClass() == kops.LoadBalancerClassNetwork { for _, id := range b.Cluster.Spec.API.LoadBalancer.AdditionalSecurityGroups { sgTask := &awstasks.SecurityGroup{ @@ -410,7 +410,7 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil // hybrid (+SpotinstHybrid) instance groups. if !featureflag.Spotinst.Enabled() || (featureflag.SpotinstHybrid.Enabled() && !HybridInstanceGroup(ig)) { - if b.UseLoadBalancerForAPI() && ig.Spec.Role == kops.InstanceGroupRoleMaster { + if b.UseLoadBalancerForAPI() && ig.HasAPIServer() { if b.UseNetworkLoadBalancer() { t.TargetGroups = append(t.TargetGroups, b.LinkToTargetGroup("tcp")) if b.Cluster.Spec.API.LoadBalancer.SSLCertificate != "" { diff --git a/pkg/model/components/etcdmanager/model.go b/pkg/model/components/etcdmanager/model.go index 71e235126e907..b36e27781d9b7 100644 --- a/pkg/model/components/etcdmanager/model.go +++ b/pkg/model/components/etcdmanager/model.go @@ -27,6 +27,7 @@ import ( "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/assets" "k8s.io/kops/pkg/dns" + "k8s.io/kops/pkg/featureflag" "k8s.io/kops/pkg/flagbuilder" "k8s.io/kops/pkg/k8scodecs" "k8s.io/kops/pkg/kubemanifest" @@ -257,8 +258,13 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster kops.EtcdClusterSpec) (*v1.Pod } etcdInsecure := !b.UseEtcdTLS() + var clientHost string - clientHost := "__name__" + if featureflag.APIServerNodes.Enabled() { + clientHost = etcdCluster.Name + ".etcd." + b.ClusterName() + } else { + clientHost = "__name__" + } clientPort := 4001 clusterName := "etcd-" + etcdCluster.Name @@ -269,6 +275,15 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster kops.EtcdClusterSpec) (*v1.Pod } pod.Name = "etcd-manager-" + etcdCluster.Name + + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + + if featureflag.APIServerNodes.Enabled() { + pod.Annotations["dns.alpha.kubernetes.io/internal"] = clientHost + } + if pod.Labels == nil { pod.Labels = make(map[string]string) } @@ -306,7 +321,6 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster kops.EtcdClusterSpec) (*v1.Pod peerPort = 2382 grpcPort = wellknownports.EtcdCiliumGRPC quarantinedClientPort = wellknownports.EtcdCiliumQuarantinedClientPort - clientHost = b.Cluster.Spec.MasterInternalName default: return nil, fmt.Errorf("unknown etcd cluster key %q", etcdCluster.Name) } diff --git a/pkg/model/components/kubeapiserver/model.go b/pkg/model/components/kubeapiserver/model.go index 62f46cc808a29..c57ed807a6ad7 100644 --- a/pkg/model/components/kubeapiserver/model.go +++ b/pkg/model/components/kubeapiserver/model.go @@ -80,7 +80,7 @@ func (b *KubeApiserverBuilder) Build(c *fi.ModelBuilderContext) error { b.AssetBuilder.StaticManifests = append(b.AssetBuilder.StaticManifests, &assets.StaticManifest{ Key: key, Path: location, - Roles: []kops.InstanceGroupRole{kops.InstanceGroupRoleMaster}, + Roles: []kops.InstanceGroupRole{kops.InstanceGroupRoleMaster, kops.InstanceGroupRoleAPIServer}, }) return nil } diff --git a/pkg/model/context.go b/pkg/model/context.go index db9372279d45f..88b9fc25aeb20 100644 --- a/pkg/model/context.go +++ b/pkg/model/context.go @@ -162,6 +162,10 @@ func (m *KopsModelContext) CloudTagsForInstanceGroup(ig *kops.InstanceGroup) (ma labels[awstasks.CloudTagInstanceGroupRolePrefix+strings.ToLower(string(kops.InstanceGroupRoleMaster))] = "1" } + if ig.Spec.Role == kops.InstanceGroupRoleAPIServer { + labels[awstasks.CloudTagInstanceGroupRolePrefix+strings.ToLower(string(kops.InstanceGroupRoleAPIServer))] = "1" + } + if ig.Spec.Role == kops.InstanceGroupRoleNode { labels[awstasks.CloudTagInstanceGroupRolePrefix+strings.ToLower(string(kops.InstanceGroupRoleNode))] = "1" } diff --git a/pkg/model/defaults/volumes.go b/pkg/model/defaults/volumes.go index 98393ac30c751..6606cabcee011 100644 --- a/pkg/model/defaults/volumes.go +++ b/pkg/model/defaults/volumes.go @@ -36,6 +36,8 @@ func DefaultInstanceGroupVolumeSize(role kops.InstanceGroupRole) (int32, error) switch role { case kops.InstanceGroupRoleMaster: return DefaultVolumeSizeMaster, nil + case kops.InstanceGroupRoleAPIServer: + return DefaultVolumeSizeNode, nil case kops.InstanceGroupRoleNode: return DefaultVolumeSizeNode, nil case kops.InstanceGroupRoleBastion: diff --git a/pkg/model/iam.go b/pkg/model/iam.go index 255d74d159dbe..64b88198eedee 100644 --- a/pkg/model/iam.go +++ b/pkg/model/iam.go @@ -199,6 +199,8 @@ func (b *IAMModelBuilder) roleKey(role iam.Subject) (string, bool) { switch role.(type) { case *iam.NodeRoleMaster: return strings.ToLower(string(kops.InstanceGroupRoleMaster)), false + case *iam.NodeRoleAPIServer: + return strings.ToLower(string(kops.InstanceGroupRoleAPIServer)), false case *iam.NodeRoleNode: return strings.ToLower(string(kops.InstanceGroupRoleNode)), false case *iam.NodeRoleBastion: diff --git a/pkg/model/iam/iam_builder.go b/pkg/model/iam/iam_builder.go index e865dd26a7388..559577ab8293d 100644 --- a/pkg/model/iam/iam_builder.go +++ b/pkg/model/iam/iam_builder.go @@ -235,6 +235,54 @@ func (b *PolicyBuilder) BuildAWSPolicy() (*Policy, error) { return p, nil } +// BuildAWSPolicy generates a custom policy for a Kubernetes master. +func (r *NodeRoleAPIServer) BuildAWSPolicy(b *PolicyBuilder) (*Policy, error) { + resource := createResource(b) + + p := &Policy{ + Version: PolicyDefaultVersion, + } + + addMasterEC2Policies(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName()) + addCertIAMPolicies(p, resource) + + var err error + if p, err = b.AddS3Permissions(p); err != nil { + return nil, fmt.Errorf("failed to generate AWS IAM S3 access statements: %v", err) + } + + if b.KMSKeys != nil && len(b.KMSKeys) != 0 { + addKMSIAMPolicies(p, stringorslice.Slice(b.KMSKeys), b.Cluster.Spec.IAM.Legacy) + } + + if b.Cluster.Spec.IAM.Legacy { + addLegacyDNSControllerPermissions(b, p) + } + AddDNSControllerPermissions(b, p) + + if b.Cluster.Spec.IAM.Legacy || b.Cluster.Spec.IAM.AllowContainerRegistry { + addECRPermissions(p) + } + + if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil { + addAmazonVPCCNIPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName(), b.IAMPrefix()) + } + + if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.LyftVPC != nil { + addLyftVPCPermissions(p, resource, b.Cluster.Spec.IAM.Legacy, b.Cluster.GetName()) + } + + if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Cilium != nil && b.Cluster.Spec.Networking.Cilium.Ipam == kops.CiliumIpamEni { + addCiliumEniPermissions(p, resource, b.Cluster.Spec.IAM.Legacy) + } + + if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Calico != nil && (b.Cluster.Spec.Networking.Calico.CrossSubnet || b.Cluster.Spec.Networking.Calico.AWSSrcDstCheck != "") { + addCalicoSrcDstCheckPermissions(p) + } + + return p, nil +} + // BuildAWSPolicy generates a custom policy for a Kubernetes master. func (r *NodeRoleMaster) BuildAWSPolicy(b *PolicyBuilder) (*Policy, error) { resource := createResource(b) @@ -544,7 +592,7 @@ func ReadableStatePaths(cluster *kops.Cluster, role Subject) ([]string, error) { var paths []string switch role.(type) { - case *NodeRoleMaster: + case *NodeRoleMaster, *NodeRoleAPIServer: paths = append(paths, "/*") case *NodeRoleNode: diff --git a/pkg/model/iam/subject.go b/pkg/model/iam/subject.go index 4da279ba22286..90d72adfd5f38 100644 --- a/pkg/model/iam/subject.go +++ b/pkg/model/iam/subject.go @@ -48,6 +48,15 @@ func (_ *NodeRoleMaster) ServiceAccount() (types.NamespacedName, bool) { return types.NamespacedName{}, false } +// NodeRoleAPIServer represents the role of API server-only nodes, and implements Subject. +type NodeRoleAPIServer struct { +} + +// ServiceAccount implements Subject. +func (_ *NodeRoleAPIServer) ServiceAccount() (types.NamespacedName, bool) { + return types.NamespacedName{}, false +} + // NodeRoleNode represents the role of normal ("worker") nodes, and implements Subject. type NodeRoleNode struct { } @@ -71,13 +80,12 @@ func BuildNodeRoleSubject(igRole kops.InstanceGroupRole) (Subject, error) { switch igRole { case kops.InstanceGroupRoleMaster: return &NodeRoleMaster{}, nil - + case kops.InstanceGroupRoleAPIServer: + return &NodeRoleAPIServer{}, nil case kops.InstanceGroupRoleNode: return &NodeRoleNode{}, nil - case kops.InstanceGroupRoleBastion: return &NodeRoleBastion{}, nil - default: return nil, fmt.Errorf("unknown instancegroup role %q", igRole) } diff --git a/pkg/model/names.go b/pkg/model/names.go index 14e5bf60b4f92..2a05f05f5f277 100644 --- a/pkg/model/names.go +++ b/pkg/model/names.go @@ -36,7 +36,7 @@ func (b *KopsModelContext) SecurityGroupName(role kops.InstanceGroupRole) string return "bastion." + b.ClusterName() case kops.InstanceGroupRoleNode: return "nodes." + b.ClusterName() - case kops.InstanceGroupRoleMaster: + case kops.InstanceGroupRoleMaster, kops.InstanceGroupRoleAPIServer: return "masters." + b.ClusterName() default: klog.Fatalf("unknown role: %v", role) @@ -58,6 +58,8 @@ func (b *KopsModelContext) AutoscalingGroupName(ig *kops.InstanceGroup) string { // though the IG name suffices for uniqueness, and with sensible naming masters // should be redundant... return ig.ObjectMeta.Name + ".masters." + b.ClusterName() + case kops.InstanceGroupRoleAPIServer: + return ig.ObjectMeta.Name + ".apiservers." + b.ClusterName() case kops.InstanceGroupRoleNode, kops.InstanceGroupRoleBastion: return ig.ObjectMeta.Name + "." + b.ClusterName() @@ -136,6 +138,8 @@ func (b *KopsModelContext) IAMName(role kops.InstanceGroupRole) string { switch role { case kops.InstanceGroupRoleMaster: return "masters." + b.ClusterName() + case kops.InstanceGroupRoleAPIServer: + return "apiservers." + b.ClusterName() case kops.InstanceGroupRoleBastion: return "bastions." + b.ClusterName() case kops.InstanceGroupRoleNode: diff --git a/pkg/nodelabels/BUILD.bazel b/pkg/nodelabels/BUILD.bazel index 9ba27bdfec4fe..2345a9e54e564 100644 --- a/pkg/nodelabels/BUILD.bazel +++ b/pkg/nodelabels/BUILD.bazel @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/apis/kops:go_default_library", + "//pkg/featureflag:go_default_library", "//util/pkg/reflectutils:go_default_library", ], ) diff --git a/pkg/nodelabels/builder.go b/pkg/nodelabels/builder.go index bf4e06a81f72d..1c16d250ede9f 100644 --- a/pkg/nodelabels/builder.go +++ b/pkg/nodelabels/builder.go @@ -18,17 +18,20 @@ package nodelabels import ( "k8s.io/kops/pkg/apis/kops" + "k8s.io/kops/pkg/featureflag" "k8s.io/kops/util/pkg/reflectutils" ) const ( - RoleLabelName15 = "kubernetes.io/role" - RoleLabelName16 = "kubernetes.io/role" - RoleMasterLabelValue15 = "master" - RoleNodeLabelValue15 = "node" + RoleLabelName15 = "kubernetes.io/role" + RoleLabelName16 = "kubernetes.io/role" + RoleMasterLabelValue15 = "master" + RoleAPIServerLabelValue15 = "api-server" + RoleNodeLabelValue15 = "node" - RoleLabelMaster16 = "node-role.kubernetes.io/master" - RoleLabelNode16 = "node-role.kubernetes.io/node" + RoleLabelMaster16 = "node-role.kubernetes.io/master" + RoleLabelAPIServer16 = "node-role.kubernetes.io/api-server" + RoleLabelNode16 = "node-role.kubernetes.io/node" RoleLabelControlPlane20 = "node-role.kubernetes.io/control-plane" ) @@ -38,6 +41,8 @@ const ( func BuildNodeLabels(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) map[string]string { isControlPlane := instanceGroup.Spec.Role == kops.InstanceGroupRoleMaster + isAPIServer := instanceGroup.Spec.Role == kops.InstanceGroupRoleAPIServer + // Merge KubeletConfig for NodeLabels c := &kops.KubeletConfigSpec{} if isControlPlane { @@ -52,13 +57,14 @@ func BuildNodeLabels(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) m nodeLabels := c.NodeLabels - if isControlPlane { + if isAPIServer || isControlPlane { if nodeLabels == nil { nodeLabels = make(map[string]string) } - for label, value := range BuildMandatoryControlPlaneLabels() { - nodeLabels[label] = value + if featureflag.APIServerNodes.Enabled() { + nodeLabels[RoleLabelAPIServer16] = "" } + nodeLabels[RoleLabelName15] = RoleAPIServerLabelValue15 } else { if nodeLabels == nil { nodeLabels = make(map[string]string) @@ -67,6 +73,15 @@ func BuildNodeLabels(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) m nodeLabels[RoleLabelName15] = RoleNodeLabelValue15 } + if isControlPlane { + if nodeLabels == nil { + nodeLabels = make(map[string]string) + } + for label, value := range BuildMandatoryControlPlaneLabels() { + nodeLabels[label] = value + } + } + for k, v := range instanceGroup.Spec.NodeLabels { if nodeLabels == nil { nodeLabels = make(map[string]string) diff --git a/pkg/nodelabels/builder_test.go b/pkg/nodelabels/builder_test.go index a2151a944fa27..68be1bd1a193f 100644 --- a/pkg/nodelabels/builder_test.go +++ b/pkg/nodelabels/builder_test.go @@ -63,7 +63,8 @@ func TestBuildNodeLabels(t *testing.T) { expected: map[string]string{ RoleLabelMaster16: "", RoleLabelControlPlane20: "", - RoleLabelName15: RoleMasterLabelValue15, + //RoleLabelAPIServer16: "", + RoleLabelName15: RoleMasterLabelValue15, "node.kubernetes.io/exclude-from-external-load-balancers": "", "kops.k8s.io/kops-controller-pki": "", "controlPlane1": "controlPlane1", diff --git a/pkg/validation/validate_cluster.go b/pkg/validation/validate_cluster.go index dc66565fe64b7..1b3dd3e933a65 100644 --- a/pkg/validation/validate_cluster.go +++ b/pkg/validation/validate_cluster.go @@ -342,30 +342,21 @@ func (v *ValidationCluster) validateNodes(cloudGroups map[string]*cloudinstances readyNodes = append(readyNodes, *node) } - if n.Role == "master" { + switch n.Role { + case "master", "apiserver", "node": if !ready { v.addError(&ValidationError{ Kind: "Node", Name: node.Name, - Message: fmt.Sprintf("master %q is not ready", node.Name), + Message: fmt.Sprintf("node %q of role %q is not ready", node.Name, n.Role), InstanceGroup: cloudGroup.InstanceGroup, }) } v.Nodes = append(v.Nodes, n) - } else if n.Role == "node" { - if !ready { - v.addError(&ValidationError{ - Kind: "Node", - Name: node.Name, - Message: fmt.Sprintf("node %q is not ready", node.Name), - InstanceGroup: cloudGroup.InstanceGroup, - }) - } - - v.Nodes = append(v.Nodes, n) - } else { + default: klog.Warningf("ignoring node with role %q", n.Role) + } } } diff --git a/pkg/validation/validate_cluster_test.go b/pkg/validation/validate_cluster_test.go index f3ebf22324426..c4bf481f9d3cd 100644 --- a/pkg/validation/validate_cluster_test.go +++ b/pkg/validation/validate_cluster_test.go @@ -317,7 +317,7 @@ func Test_ValidateNodeNotReady(t *testing.T) { !assert.Equal(t, &ValidationError{ Kind: "Node", Name: "node-1b", - Message: "node \"node-1b\" is not ready", + Message: "node \"node-1b\" of role \"node\" is not ready", InstanceGroup: groups["node-1"].InstanceGroup, }, v.Failures[0]) { printDebug(t, v) @@ -425,7 +425,7 @@ func Test_ValidateMasterNotReady(t *testing.T) { !assert.Equal(t, &ValidationError{ Kind: "Node", Name: "master-1b", - Message: "master \"master-1b\" is not ready", + Message: "node \"master-1b\" of role \"master\" is not ready", InstanceGroup: groups["node-1"].InstanceGroup, }, v.Failures[0]) { printDebug(t, v) @@ -513,7 +513,7 @@ func Test_ValidateMasterStaticPods(t *testing.T) { { Kind: "Node", Name: "master-1c", - Message: "master \"master-1c\" is not ready", + Message: "node \"master-1c\" of role \"master\" is not ready", InstanceGroup: groups["node-1"].InstanceGroup, }, } diff --git a/tests/integration/update_cluster/apiservernodes/cloudformation.json b/tests/integration/update_cluster/apiservernodes/cloudformation.json new file mode 100644 index 0000000000000..ef650df94cf61 --- /dev/null +++ b/tests/integration/update_cluster/apiservernodes/cloudformation.json @@ -0,0 +1,1511 @@ +{ + "Resources": { + "AWSAutoScalingAutoScalingGroupapiserverapiserversminimalexamplecom": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AutoScalingGroupName": "apiserver.apiservers.minimal.example.com", + "LaunchTemplate": { + "LaunchTemplateId": { + "Ref": "AWSEC2LaunchTemplateapiserverapiserversminimalexamplecom" + }, + "Version": { + "Fn::GetAtt": [ + "AWSEC2LaunchTemplateapiserverapiserversminimalexamplecom", + "LatestVersionNumber" + ] + } + }, + "MaxSize": "2", + "MinSize": "2", + "VPCZoneIdentifier": [ + { + "Ref": "AWSEC2Subnetustest1aminimalexamplecom" + } + ], + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com", + "PropagateAtLaunch": true + }, + { + "Key": "Name", + "Value": "apiserver.apiservers.minimal.example.com", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", + "Value": "api-server", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", + "Value": "", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/role/apiserver", + "Value": "1", + "PropagateAtLaunch": true + }, + { + "Key": "kops.k8s.io/instancegroup", + "Value": "apiserver", + "PropagateAtLaunch": true + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned", + "PropagateAtLaunch": true + } + ], + "MetricsCollection": [ + { + "Granularity": "1Minute", + "Metrics": [ + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupMaxSize", + "GroupMinSize", + "GroupPendingInstances", + "GroupStandbyInstances", + "GroupTerminatingInstances", + "GroupTotalInstances" + ] + } + ] + } + }, + "AWSAutoScalingAutoScalingGroupmasterustest1amastersminimalexamplecom": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AutoScalingGroupName": "master-us-test-1a.masters.minimal.example.com", + "LaunchTemplate": { + "LaunchTemplateId": { + "Ref": "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom" + }, + "Version": { + "Fn::GetAtt": [ + "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom", + "LatestVersionNumber" + ] + } + }, + "MaxSize": "1", + "MinSize": "1", + "VPCZoneIdentifier": [ + { + "Ref": "AWSEC2Subnetustest1aminimalexamplecom" + } + ], + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com", + "PropagateAtLaunch": true + }, + { + "Key": "Name", + "Value": "master-us-test-1a.masters.minimal.example.com", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", + "Value": "", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", + "Value": "master", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", + "Value": "", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", + "Value": "", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", + "Value": "", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", + "Value": "", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/role/master", + "Value": "1", + "PropagateAtLaunch": true + }, + { + "Key": "kops.k8s.io/instancegroup", + "Value": "master-us-test-1a", + "PropagateAtLaunch": true + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned", + "PropagateAtLaunch": true + } + ], + "MetricsCollection": [ + { + "Granularity": "1Minute", + "Metrics": [ + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupMaxSize", + "GroupMinSize", + "GroupPendingInstances", + "GroupStandbyInstances", + "GroupTerminatingInstances", + "GroupTotalInstances" + ] + } + ] + } + }, + "AWSAutoScalingAutoScalingGroupnodesminimalexamplecom": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AutoScalingGroupName": "nodes.minimal.example.com", + "LaunchTemplate": { + "LaunchTemplateId": { + "Ref": "AWSEC2LaunchTemplatenodesminimalexamplecom" + }, + "Version": { + "Fn::GetAtt": [ + "AWSEC2LaunchTemplatenodesminimalexamplecom", + "LatestVersionNumber" + ] + } + }, + "MaxSize": "2", + "MinSize": "2", + "VPCZoneIdentifier": [ + { + "Ref": "AWSEC2Subnetustest1aminimalexamplecom" + } + ], + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com", + "PropagateAtLaunch": true + }, + { + "Key": "Name", + "Value": "nodes.minimal.example.com", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", + "Value": "node", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", + "Value": "", + "PropagateAtLaunch": true + }, + { + "Key": "k8s.io/role/node", + "Value": "1", + "PropagateAtLaunch": true + }, + { + "Key": "kops.k8s.io/instancegroup", + "Value": "nodes", + "PropagateAtLaunch": true + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned", + "PropagateAtLaunch": true + } + ], + "MetricsCollection": [ + { + "Granularity": "1Minute", + "Metrics": [ + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupMaxSize", + "GroupMinSize", + "GroupPendingInstances", + "GroupStandbyInstances", + "GroupTerminatingInstances", + "GroupTotalInstances" + ] + } + ] + } + }, + "AWSEC2DHCPOptionsminimalexamplecom": { + "Type": "AWS::EC2::DHCPOptions", + "Properties": { + "DomainName": "us-test-1.compute.internal", + "DomainNameServers": [ + "AmazonProvidedDNS" + ], + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "minimal.example.com" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + }, + "AWSEC2InternetGatewayminimalexamplecom": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "minimal.example.com" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + }, + "AWSEC2LaunchTemplateapiserverapiserversminimalexamplecom": { + "Type": "AWS::EC2::LaunchTemplate", + "Properties": { + "LaunchTemplateName": "apiserver.apiservers.minimal.example.com", + "LaunchTemplateData": { + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "VolumeType": "gp3", + "VolumeSize": 128, + "Iops": 3000, + "Throughput": 125, + "DeleteOnTermination": true, + "Encrypted": true + } + } + ], + "IamInstanceProfile": { + "Name": { + "Ref": "AWSIAMInstanceProfileapiserversminimalexamplecom" + } + }, + "ImageId": "ami-12345678", + "InstanceType": "t2.medium", + "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", + "MetadataOptions": { + "HttpPutResponseHopLimit": 1, + "HttpTokens": "optional" + }, + "NetworkInterfaces": [ + { + "AssociatePublicIpAddress": true, + "DeleteOnTermination": true, + "DeviceIndex": 0, + "Groups": [ + { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + } + ] + } + ], + "TagSpecifications": [ + { + "ResourceType": "instance", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "apiserver.apiservers.minimal.example.com" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", + "Value": "api-server" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", + "Value": "" + }, + { + "Key": "k8s.io/role/apiserver", + "Value": "1" + }, + { + "Key": "kops.k8s.io/instancegroup", + "Value": "apiserver" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + }, + { + "ResourceType": "volume", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "apiserver.apiservers.minimal.example.com" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", + "Value": "api-server" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", + "Value": "" + }, + { + "Key": "k8s.io/role/apiserver", + "Value": "1" + }, + { + "Key": "kops.k8s.io/instancegroup", + "Value": "apiserver" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + ], + "UserData": "extracted" + } + } + }, + "AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom": { + "Type": "AWS::EC2::LaunchTemplate", + "Properties": { + "LaunchTemplateName": "master-us-test-1a.masters.minimal.example.com", + "LaunchTemplateData": { + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "VolumeType": "gp3", + "VolumeSize": 64, + "Iops": 3000, + "Throughput": 125, + "DeleteOnTermination": true, + "Encrypted": true + } + }, + { + "DeviceName": "/dev/sdc", + "VirtualName": "ephemeral0" + } + ], + "IamInstanceProfile": { + "Name": { + "Ref": "AWSIAMInstanceProfilemastersminimalexamplecom" + } + }, + "ImageId": "ami-12345678", + "InstanceType": "m3.medium", + "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", + "MetadataOptions": { + "HttpPutResponseHopLimit": 1, + "HttpTokens": "optional" + }, + "NetworkInterfaces": [ + { + "AssociatePublicIpAddress": true, + "DeleteOnTermination": true, + "DeviceIndex": 0, + "Groups": [ + { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + } + ] + } + ], + "TagSpecifications": [ + { + "ResourceType": "instance", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "master-us-test-1a.masters.minimal.example.com" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", + "Value": "" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", + "Value": "master" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", + "Value": "" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", + "Value": "" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", + "Value": "" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", + "Value": "" + }, + { + "Key": "k8s.io/role/master", + "Value": "1" + }, + { + "Key": "kops.k8s.io/instancegroup", + "Value": "master-us-test-1a" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + }, + { + "ResourceType": "volume", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "master-us-test-1a.masters.minimal.example.com" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki", + "Value": "" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", + "Value": "master" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/api-server", + "Value": "" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane", + "Value": "" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master", + "Value": "" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers", + "Value": "" + }, + { + "Key": "k8s.io/role/master", + "Value": "1" + }, + { + "Key": "kops.k8s.io/instancegroup", + "Value": "master-us-test-1a" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + ], + "UserData": "extracted" + } + } + }, + "AWSEC2LaunchTemplatenodesminimalexamplecom": { + "Type": "AWS::EC2::LaunchTemplate", + "Properties": { + "LaunchTemplateName": "nodes.minimal.example.com", + "LaunchTemplateData": { + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "VolumeType": "gp3", + "VolumeSize": 128, + "Iops": 3000, + "Throughput": 125, + "DeleteOnTermination": true, + "Encrypted": true + } + } + ], + "IamInstanceProfile": { + "Name": { + "Ref": "AWSIAMInstanceProfilenodesminimalexamplecom" + } + }, + "ImageId": "ami-12345678", + "InstanceType": "t2.medium", + "KeyName": "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57", + "MetadataOptions": { + "HttpPutResponseHopLimit": 1, + "HttpTokens": "optional" + }, + "NetworkInterfaces": [ + { + "AssociatePublicIpAddress": true, + "DeleteOnTermination": true, + "DeviceIndex": 0, + "Groups": [ + { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + } + ] + } + ], + "TagSpecifications": [ + { + "ResourceType": "instance", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "nodes.minimal.example.com" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", + "Value": "node" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", + "Value": "" + }, + { + "Key": "k8s.io/role/node", + "Value": "1" + }, + { + "Key": "kops.k8s.io/instancegroup", + "Value": "nodes" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + }, + { + "ResourceType": "volume", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "nodes.minimal.example.com" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role", + "Value": "node" + }, + { + "Key": "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node", + "Value": "" + }, + { + "Key": "k8s.io/role/node", + "Value": "1" + }, + { + "Key": "kops.k8s.io/instancegroup", + "Value": "nodes" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + ], + "UserData": "extracted" + } + } + }, + "AWSEC2Route00000": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "AWSEC2RouteTableminimalexamplecom" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "AWSEC2InternetGatewayminimalexamplecom" + } + } + }, + "AWSEC2RouteTableminimalexamplecom": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "AWSEC2VPCminimalexamplecom" + }, + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "minimal.example.com" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + }, + { + "Key": "kubernetes.io/kops/role", + "Value": "public" + } + ] + } + }, + "AWSEC2SecurityGroupEgressfrommastersminimalexamplecomegressall0to000000": { + "Type": "AWS::EC2::SecurityGroupEgress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "FromPort": 0, + "ToPort": 0, + "IpProtocol": "-1", + "CidrIp": "0.0.0.0/0" + } + }, + "AWSEC2SecurityGroupEgressfromnodesminimalexamplecomegressall0to000000": { + "Type": "AWS::EC2::SecurityGroupEgress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "FromPort": 0, + "ToPort": 0, + "IpProtocol": "-1", + "CidrIp": "0.0.0.0/0" + } + }, + "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22mastersminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "FromPort": 22, + "ToPort": 22, + "IpProtocol": "tcp", + "CidrIp": "0.0.0.0/0" + } + }, + "AWSEC2SecurityGroupIngressfrom00000ingresstcp22to22nodesminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "FromPort": 22, + "ToPort": 22, + "IpProtocol": "tcp", + "CidrIp": "0.0.0.0/0" + } + }, + "AWSEC2SecurityGroupIngressfrom00000ingresstcp443to443mastersminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "FromPort": 443, + "ToPort": 443, + "IpProtocol": "tcp", + "CidrIp": "0.0.0.0/0" + } + }, + "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0mastersminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "SourceSecurityGroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "FromPort": 0, + "ToPort": 0, + "IpProtocol": "-1" + } + }, + "AWSEC2SecurityGroupIngressfrommastersminimalexamplecomingressall0to0nodesminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "SourceSecurityGroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "FromPort": 0, + "ToPort": 0, + "IpProtocol": "-1" + } + }, + "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressall0to0nodesminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "SourceSecurityGroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "FromPort": 0, + "ToPort": 0, + "IpProtocol": "-1" + } + }, + "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp1to2379mastersminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "SourceSecurityGroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "FromPort": 1, + "ToPort": 2379, + "IpProtocol": "tcp" + } + }, + "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp2382to4000mastersminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "SourceSecurityGroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "FromPort": 2382, + "ToPort": 4000, + "IpProtocol": "tcp" + } + }, + "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingresstcp4003to65535mastersminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "SourceSecurityGroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "FromPort": 4003, + "ToPort": 65535, + "IpProtocol": "tcp" + } + }, + "AWSEC2SecurityGroupIngressfromnodesminimalexamplecomingressudp1to65535mastersminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Ref": "AWSEC2SecurityGroupmastersminimalexamplecom" + }, + "SourceSecurityGroupId": { + "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" + }, + "FromPort": 1, + "ToPort": 65535, + "IpProtocol": "udp" + } + }, + "AWSEC2SecurityGroupmastersminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupName": "masters.minimal.example.com", + "VpcId": { + "Ref": "AWSEC2VPCminimalexamplecom" + }, + "GroupDescription": "Security group for masters", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "masters.minimal.example.com" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + }, + "AWSEC2SecurityGroupnodesminimalexamplecom": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupName": "nodes.minimal.example.com", + "VpcId": { + "Ref": "AWSEC2VPCminimalexamplecom" + }, + "GroupDescription": "Security group for nodes", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "nodes.minimal.example.com" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + }, + "AWSEC2SubnetRouteTableAssociationustest1aminimalexamplecom": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "SubnetId": { + "Ref": "AWSEC2Subnetustest1aminimalexamplecom" + }, + "RouteTableId": { + "Ref": "AWSEC2RouteTableminimalexamplecom" + } + } + }, + "AWSEC2Subnetustest1aminimalexamplecom": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": { + "Ref": "AWSEC2VPCminimalexamplecom" + }, + "CidrBlock": "172.20.32.0/19", + "AvailabilityZone": "us-test-1a", + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "us-test-1a.minimal.example.com" + }, + { + "Key": "SubnetType", + "Value": "Public" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, + "AWSEC2VPCDHCPOptionsAssociationminimalexamplecom": { + "Type": "AWS::EC2::VPCDHCPOptionsAssociation", + "Properties": { + "VpcId": { + "Ref": "AWSEC2VPCminimalexamplecom" + }, + "DhcpOptionsId": { + "Ref": "AWSEC2DHCPOptionsminimalexamplecom" + } + } + }, + "AWSEC2VPCGatewayAttachmentminimalexamplecom": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": { + "Ref": "AWSEC2VPCminimalexamplecom" + }, + "InternetGatewayId": { + "Ref": "AWSEC2InternetGatewayminimalexamplecom" + } + } + }, + "AWSEC2VPCminimalexamplecom": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "172.20.0.0/16", + "EnableDnsHostnames": true, + "EnableDnsSupport": true, + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "minimal.example.com" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + }, + "AWSEC2Volumeustest1aetcdeventsminimalexamplecom": { + "Type": "AWS::EC2::Volume", + "Properties": { + "AvailabilityZone": "us-test-1a", + "Size": 20, + "VolumeType": "gp3", + "Iops": 3000, + "Throughput": 125, + "Encrypted": false, + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "us-test-1a.etcd-events.minimal.example.com" + }, + { + "Key": "k8s.io/etcd/events", + "Value": "us-test-1a/us-test-1a" + }, + { + "Key": "k8s.io/role/master", + "Value": "1" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + }, + "AWSEC2Volumeustest1aetcdmainminimalexamplecom": { + "Type": "AWS::EC2::Volume", + "Properties": { + "AvailabilityZone": "us-test-1a", + "Size": 20, + "VolumeType": "gp3", + "Iops": 3000, + "Throughput": 125, + "Encrypted": false, + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "us-test-1a.etcd-main.minimal.example.com" + }, + { + "Key": "k8s.io/etcd/main", + "Value": "us-test-1a/us-test-1a" + }, + { + "Key": "k8s.io/role/master", + "Value": "1" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + }, + "AWSIAMInstanceProfileapiserversminimalexamplecom": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": { + "InstanceProfileName": "apiservers.minimal.example.com", + "Roles": [ + { + "Ref": "AWSIAMRoleapiserversminimalexamplecom" + } + ] + } + }, + "AWSIAMInstanceProfilemastersminimalexamplecom": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": { + "InstanceProfileName": "masters.minimal.example.com", + "Roles": [ + { + "Ref": "AWSIAMRolemastersminimalexamplecom" + } + ] + } + }, + "AWSIAMInstanceProfilenodesminimalexamplecom": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": { + "InstanceProfileName": "nodes.minimal.example.com", + "Roles": [ + { + "Ref": "AWSIAMRolenodesminimalexamplecom" + } + ] + } + }, + "AWSIAMPolicyapiserversminimalexamplecom": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyName": "apiservers.minimal.example.com", + "Roles": [ + { + "Ref": "AWSIAMRoleapiserversminimalexamplecom" + } + ], + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DescribeVolumesModifications", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress" + ], + "Condition": { + "StringEquals": { + "ec2:ResourceTag/KubernetesCluster": "minimal.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:ListServerCertificates", + "iam:GetServerCertificate" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + } + } + }, + "AWSIAMPolicymastersminimalexamplecom": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyName": "masters.minimal.example.com", + "Roles": [ + { + "Ref": "AWSIAMRolemastersminimalexamplecom" + } + ], + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DescribeVolumesModifications", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress" + ], + "Condition": { + "StringEquals": { + "ec2:ResourceTag/KubernetesCluster": "minimal.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeLaunchTemplateVersions" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup" + ], + "Condition": { + "StringEquals": { + "autoscaling:ResourceTag/KubernetesCluster": "minimal.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:ListServerCertificates", + "iam:GetServerCertificate" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + } + } + }, + "AWSIAMPolicynodesminimalexamplecom": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyName": "nodes.minimal.example.com", + "Roles": [ + { + "Ref": "AWSIAMRolenodesminimalexamplecom" + } + ], + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + } + } + }, + "AWSIAMRoleapiserversminimalexamplecom": { + "Type": "AWS::IAM::Role", + "Properties": { + "RoleName": "apiservers.minimal.example.com", + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "apiservers.minimal.example.com" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + }, + "AWSIAMRolemastersminimalexamplecom": { + "Type": "AWS::IAM::Role", + "Properties": { + "RoleName": "masters.minimal.example.com", + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "masters.minimal.example.com" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + }, + "AWSIAMRolenodesminimalexamplecom": { + "Type": "AWS::IAM::Role", + "Properties": { + "RoleName": "nodes.minimal.example.com", + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "Tags": [ + { + "Key": "KubernetesCluster", + "Value": "minimal.example.com" + }, + { + "Key": "Name", + "Value": "nodes.minimal.example.com" + }, + { + "Key": "kubernetes.io/cluster/minimal.example.com", + "Value": "owned" + } + ] + } + } + } +} diff --git a/tests/integration/update_cluster/apiservernodes/cloudformation.json.extracted.yaml b/tests/integration/update_cluster/apiservernodes/cloudformation.json.extracted.yaml new file mode 100644 index 0000000000000..9918c672d3d37 --- /dev/null +++ b/tests/integration/update_cluster/apiservernodes/cloudformation.json.extracted.yaml @@ -0,0 +1,799 @@ +Resources.AWSEC2LaunchTemplateapiserverapiserversminimalexamplecom.Properties.LaunchTemplateData.UserData: | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/nodeup + NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 + NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/nodeup + NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + + export AWS_REGION=us-test-1 + + + + + sysctl -w net.ipv4.tcp_rmem='4096 12582912 16777216' || true + + + function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} + } + + # Retry a download until we get it. args: name, sha, url1, url2... + download-or-bust() { + local -r file="$1" + local -r hash="$2" + shift 2 + + urls=( $* ) + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + if [[ -n "${hash}" ]]; then + echo "== Downloaded ${url} (SHA1 = ${hash}) ==" + else + echo "== Downloaded ${url} ==" + fi + return + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done + } + + validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi + } + + function split-commas() { + echo $1 | tr "," "\n" + } + + function try-download-release() { + local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") ) + if [[ -n "${NODEUP_HASH:-}" ]]; then + local -r nodeup_hash="${NODEUP_HASH}" + else + # TODO: Remove? + echo "Downloading sha256 (not found in env)" + download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}" + local -r nodeup_hash=$(cat nodeup.sha256) + fi + + echo "Downloading nodeup (${nodeup_urls[@]})" + download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}" + + chmod +x nodeup + } + + function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + # In case of failure checking integrity of release, retry. + cd ${INSTALL_DIR}/bin + until try-download-release; do + sleep 15 + echo "Couldn't download release. Retrying..." + done + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) + } + + #################################################################################### + + /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + + echo "== nodeup node config starting ==" + ensure-install-dir + + cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' + cloudConfig: + manageStorageClasses: true + containerRuntime: containerd + containerd: + configOverride: | + version = 2 + + [plugins] + + [plugins."io.containerd.grpc.v1.cri"] + + [plugins."io.containerd.grpc.v1.cri".containerd] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + logLevel: info + version: 1.4.3 + docker: + skipInstall: true + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + hostnameOverride: '@aws' + image: k8s.gcr.io/kube-proxy:v1.20.0 + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hostnameOverride: '@aws' + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nonMasqueradeCIDR: 100.64.0.0/10 + podManifestPath: /etc/kubernetes/manifests + + __EOF_CLUSTER_SPEC + + cat > conf/ig_spec.yaml << '__EOF_IG_SPEC' + {} + + __EOF_IG_SPEC + + cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' + Assets: + amd64: + - ff2422571c4c1e9696e367f5f25466b96fb6e501f28aed29f414b1524a52dea0@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/amd64/kubelet + - a5895007f331f08d2e082eb12458764949559f30bcc5beae26c38f3e2724262c@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 2697a342e3477c211ab48313e259fd7e32ad1f5ded19320e6a559f50a82bff3d@https://github.com/containerd/containerd/releases/download/v1.4.3/cri-containerd-cni-1.4.3-linux-amd64.tar.gz + arm64: + - 47ab6c4273fc3bb0cb8ec9517271d915890c5a6b0e54b2991e7a8fbbe77b06e4@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/arm64/kubelet + - 25e4465870c99167e6c466623ed8f05a1d20fbcb48cab6688109389b52d87623@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - 6e3f80e8451ecbe7b3559247721c3e226be6b228acaadee7e13683f80c20e81c@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.0.tgz + ClusterName: minimal.example.com + ConfigBase: memfs://clusters.example.com/minimal.example.com + InstanceGroupName: apiserver + InstanceGroupRole: APIServer + KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hostnameOverride: '@aws' + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kubernetes.io/role: api-server + node-role.kubernetes.io/api-server: "" + nonMasqueradeCIDR: 100.64.0.0/10 + podManifestPath: /etc/kubernetes/manifests + channels: + - memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml + staticManifests: + - key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml + + __EOF_KUBE_ENV + + download-release + echo "== nodeup node config done ==" +Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/nodeup + NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 + NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/nodeup + NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + + export AWS_REGION=us-test-1 + + + + + sysctl -w net.ipv4.tcp_rmem='4096 12582912 16777216' || true + + + function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} + } + + # Retry a download until we get it. args: name, sha, url1, url2... + download-or-bust() { + local -r file="$1" + local -r hash="$2" + shift 2 + + urls=( $* ) + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + if [[ -n "${hash}" ]]; then + echo "== Downloaded ${url} (SHA1 = ${hash}) ==" + else + echo "== Downloaded ${url} ==" + fi + return + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done + } + + validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi + } + + function split-commas() { + echo $1 | tr "," "\n" + } + + function try-download-release() { + local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") ) + if [[ -n "${NODEUP_HASH:-}" ]]; then + local -r nodeup_hash="${NODEUP_HASH}" + else + # TODO: Remove? + echo "Downloading sha256 (not found in env)" + download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}" + local -r nodeup_hash=$(cat nodeup.sha256) + fi + + echo "Downloading nodeup (${nodeup_urls[@]})" + download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}" + + chmod +x nodeup + } + + function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + # In case of failure checking integrity of release, retry. + cd ${INSTALL_DIR}/bin + until try-download-release; do + sleep 15 + echo "Couldn't download release. Retrying..." + done + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) + } + + #################################################################################### + + /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + + echo "== nodeup node config starting ==" + ensure-install-dir + + cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' + cloudConfig: + manageStorageClasses: true + containerRuntime: containerd + containerd: + configOverride: | + version = 2 + + [plugins] + + [plugins."io.containerd.grpc.v1.cri"] + + [plugins."io.containerd.grpc.v1.cri".containerd] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + logLevel: info + version: 1.4.3 + docker: + skipInstall: true + encryptionConfig: null + etcdClusters: + events: + version: 3.4.13 + main: + version: 3.4.13 + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - PersistentVolumeLabel + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - http://127.0.0.1:4001 + etcdServersOverrides: + - /events#http://127.0.0.1:4002 + image: k8s.gcr.io/kube-apiserver:v1.20.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.minimal.example.com + serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: minimal.example.com + configureCloudRoutes: false + image: k8s.gcr.io/kube-controller-manager:v1.20.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + hostnameOverride: '@aws' + image: k8s.gcr.io/kube-proxy:v1.20.0 + logLevel: 2 + kubeScheduler: + image: k8s.gcr.io/kube-scheduler:v1.20.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hostnameOverride: '@aws' + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nonMasqueradeCIDR: 100.64.0.0/10 + podManifestPath: /etc/kubernetes/manifests + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hostnameOverride: '@aws' + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nonMasqueradeCIDR: 100.64.0.0/10 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: false + + __EOF_CLUSTER_SPEC + + cat > conf/ig_spec.yaml << '__EOF_IG_SPEC' + {} + + __EOF_IG_SPEC + + cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' + Assets: + amd64: + - ff2422571c4c1e9696e367f5f25466b96fb6e501f28aed29f414b1524a52dea0@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/amd64/kubelet + - a5895007f331f08d2e082eb12458764949559f30bcc5beae26c38f3e2724262c@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 2697a342e3477c211ab48313e259fd7e32ad1f5ded19320e6a559f50a82bff3d@https://github.com/containerd/containerd/releases/download/v1.4.3/cri-containerd-cni-1.4.3-linux-amd64.tar.gz + - f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/protokube + - 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/channels + arm64: + - 47ab6c4273fc3bb0cb8ec9517271d915890c5a6b0e54b2991e7a8fbbe77b06e4@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/arm64/kubelet + - 25e4465870c99167e6c466623ed8f05a1d20fbcb48cab6688109389b52d87623@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - 6e3f80e8451ecbe7b3559247721c3e226be6b228acaadee7e13683f80c20e81c@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.0.tgz + - 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/protokube + - 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/channels + ClusterName: minimal.example.com + ConfigBase: memfs://clusters.example.com/minimal.example.com + InstanceGroupName: master-us-test-1a + InstanceGroupRole: Master + KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hostnameOverride: '@aws' + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/api-server: "" + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + nonMasqueradeCIDR: 100.64.0.0/10 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: false + channels: + - memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml + etcdManifests: + - memfs://clusters.example.com/minimal.example.com/manifests/etcd/main.yaml + - memfs://clusters.example.com/minimal.example.com/manifests/etcd/events.yaml + staticManifests: + - key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml + + __EOF_KUBE_ENV + + download-release + echo "== nodeup node config done ==" +Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/nodeup + NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 + NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/nodeup + NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + + export AWS_REGION=us-test-1 + + + + + sysctl -w net.ipv4.tcp_rmem='4096 12582912 16777216' || true + + + function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} + } + + # Retry a download until we get it. args: name, sha, url1, url2... + download-or-bust() { + local -r file="$1" + local -r hash="$2" + shift 2 + + urls=( $* ) + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + if [[ -n "${hash}" ]]; then + echo "== Downloaded ${url} (SHA1 = ${hash}) ==" + else + echo "== Downloaded ${url} ==" + fi + return + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done + } + + validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi + } + + function split-commas() { + echo $1 | tr "," "\n" + } + + function try-download-release() { + local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") ) + if [[ -n "${NODEUP_HASH:-}" ]]; then + local -r nodeup_hash="${NODEUP_HASH}" + else + # TODO: Remove? + echo "Downloading sha256 (not found in env)" + download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}" + local -r nodeup_hash=$(cat nodeup.sha256) + fi + + echo "Downloading nodeup (${nodeup_urls[@]})" + download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}" + + chmod +x nodeup + } + + function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + # In case of failure checking integrity of release, retry. + cd ${INSTALL_DIR}/bin + until try-download-release; do + sleep 15 + echo "Couldn't download release. Retrying..." + done + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) + } + + #################################################################################### + + /bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + + echo "== nodeup node config starting ==" + ensure-install-dir + + cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' + cloudConfig: + manageStorageClasses: true + containerRuntime: containerd + containerd: + configOverride: | + version = 2 + + [plugins] + + [plugins."io.containerd.grpc.v1.cri"] + + [plugins."io.containerd.grpc.v1.cri".containerd] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + logLevel: info + version: 1.4.3 + docker: + skipInstall: true + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + hostnameOverride: '@aws' + image: k8s.gcr.io/kube-proxy:v1.20.0 + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hostnameOverride: '@aws' + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nonMasqueradeCIDR: 100.64.0.0/10 + podManifestPath: /etc/kubernetes/manifests + + __EOF_CLUSTER_SPEC + + cat > conf/ig_spec.yaml << '__EOF_IG_SPEC' + {} + + __EOF_IG_SPEC + + cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' + Assets: + amd64: + - ff2422571c4c1e9696e367f5f25466b96fb6e501f28aed29f414b1524a52dea0@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/amd64/kubelet + - a5895007f331f08d2e082eb12458764949559f30bcc5beae26c38f3e2724262c@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 2697a342e3477c211ab48313e259fd7e32ad1f5ded19320e6a559f50a82bff3d@https://github.com/containerd/containerd/releases/download/v1.4.3/cri-containerd-cni-1.4.3-linux-amd64.tar.gz + arm64: + - 47ab6c4273fc3bb0cb8ec9517271d915890c5a6b0e54b2991e7a8fbbe77b06e4@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/arm64/kubelet + - 25e4465870c99167e6c466623ed8f05a1d20fbcb48cab6688109389b52d87623@https://storage.googleapis.com/kubernetes-release/release/v1.20.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - 6e3f80e8451ecbe7b3559247721c3e226be6b228acaadee7e13683f80c20e81c@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.0.tgz + ClusterName: minimal.example.com + ConfigBase: memfs://clusters.example.com/minimal.example.com + InstanceGroupName: nodes + InstanceGroupRole: Node + KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hostnameOverride: '@aws' + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + nonMasqueradeCIDR: 100.64.0.0/10 + podManifestPath: /etc/kubernetes/manifests + channels: + - memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml + + __EOF_KUBE_ENV + + download-release + echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/apiservernodes/id_rsa.pub b/tests/integration/update_cluster/apiservernodes/id_rsa.pub new file mode 100755 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/apiservernodes/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/apiservernodes/in-v1alpha2.yaml b/tests/integration/update_cluster/apiservernodes/in-v1alpha2.yaml new file mode 100644 index 0000000000000..c56d7c2b80a76 --- /dev/null +++ b/tests/integration/update_cluster/apiservernodes/in-v1alpha2.yaml @@ -0,0 +1,99 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: minimal.example.com +spec: + kubernetesApiAccess: + - 0.0.0.0/0 + channel: stable + cloudProvider: aws + configBase: memfs://clusters.example.com/minimal.example.com + etcdClusters: + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + iam: {} + kubelet: + anonymousAuth: false + kubernetesVersion: v1.20.0 + masterInternalName: api.internal.minimal.example.com + masterPublicName: api.minimal.example.com + networkCIDR: 172.20.0.0/16 + networking: + cni: {} + nonMasqueradeCIDR: 100.64.0.0/10 + sshAccess: + - 0.0.0.0/0 + topology: + masters: public + nodes: public + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: nodes + labels: + kops.k8s.io/cluster: minimal.example.com +spec: + associatePublicIp: true + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: t2.medium + maxSize: 2 + minSize: 2 + role: Node + subnets: + - us-test-1a + +--- + + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: apiserver + labels: + kops.k8s.io/cluster: minimal.example.com +spec: + associatePublicIp: true + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: t2.medium + maxSize: 2 + minSize: 2 + role: APIServer + subnets: + - us-test-1a + +--- + + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: master-us-test-1a + labels: + kops.k8s.io/cluster: minimal.example.com +spec: + associatePublicIp: true + image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test-1a diff --git a/upup/pkg/fi/cloudup/awsup/instancegroups.go b/upup/pkg/fi/cloudup/awsup/instancegroups.go index 5bad5005b1778..d0b6a4495a0cb 100644 --- a/upup/pkg/fi/cloudup/awsup/instancegroups.go +++ b/upup/pkg/fi/cloudup/awsup/instancegroups.go @@ -31,6 +31,8 @@ func matchInstanceGroup(name string, clusterName string, instancegroups []*kops. switch g.Spec.Role { case kops.InstanceGroupRoleMaster: groupName = g.ObjectMeta.Name + ".masters." + clusterName + case kops.InstanceGroupRoleAPIServer: + groupName = g.ObjectMeta.Name + ".apiservers." + clusterName case kops.InstanceGroupRoleNode: groupName = g.ObjectMeta.Name + "." + clusterName case kops.InstanceGroupRoleBastion: diff --git a/upup/pkg/fi/cloudup/populate_instancegroup_spec.go b/upup/pkg/fi/cloudup/populate_instancegroup_spec.go index f9985d1b7fe7b..e7628da7fbd2a 100644 --- a/upup/pkg/fi/cloudup/populate_instancegroup_spec.go +++ b/upup/pkg/fi/cloudup/populate_instancegroup_spec.go @@ -25,6 +25,7 @@ import ( "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops/util" "k8s.io/kops/pkg/apis/kops/validation" + "k8s.io/kops/pkg/featureflag" "k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi/cloudup/awsup" "k8s.io/kops/upup/pkg/fi/cloudup/openstack" @@ -104,6 +105,9 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup, ig.Spec.MaxSize = fi.Int32(1) } } else { + if ig.IsAPIServerOnly() && !featureflag.APIServerNodes.Enabled() { + return nil, fmt.Errorf("apiserver nodes requires the APIServrNodes feature flag to be enabled") + } if ig.Spec.MachineType == "" { ig.Spec.MachineType, err = defaultMachineType(cloud, cluster, ig) if err != nil { diff --git a/upup/pkg/fi/cloudup/template_functions.go b/upup/pkg/fi/cloudup/template_functions.go index c6137615ccfb2..6262f01dc97e8 100644 --- a/upup/pkg/fi/cloudup/template_functions.go +++ b/upup/pkg/fi/cloudup/template_functions.go @@ -452,7 +452,7 @@ func (tf *TemplateFunctions) KopsControllerConfig() (string, error) { case kops.CloudProviderAWS: nodesRoles := sets.String{} for _, ig := range tf.InstanceGroups { - if ig.Spec.Role == kops.InstanceGroupRoleNode { + if ig.Spec.Role == kops.InstanceGroupRoleNode || ig.Spec.Role == kops.InstanceGroupRoleAPIServer { profile, err := tf.LinkToIAMInstanceProfile(ig) if err != nil { return "", fmt.Errorf("getting profile for ig %s: %v", ig.Name, err)