From b9212f85ad931cea2b8efc1ddd2452795802ad3a Mon Sep 17 00:00:00 2001 From: Ole Markus With Date: Fri, 11 Sep 2020 20:55:18 +0200 Subject: [PATCH] Add addon for aws node termination handler --- k8s/crds/kops.k8s.io_clusters.yaml | 16 ++ pkg/apis/kops/cluster.go | 3 + pkg/apis/kops/componentconfig.go | 16 ++ pkg/apis/kops/v1alpha2/cluster.go | 4 + pkg/apis/kops/v1alpha2/componentconfig.go | 16 ++ .../kops/v1alpha2/zz_generated.conversion.go | 54 +++++ .../kops/v1alpha2/zz_generated.deepcopy.go | 41 ++++ pkg/apis/kops/validation/validation.go | 11 + pkg/apis/kops/zz_generated.deepcopy.go | 41 ++++ pkg/model/components/BUILD.bazel | 1 + .../components/nodeterminationhandler.go | 52 +++++ upup/models/bindata.go | 197 ++++++++++++++++++ .../k8s-1.11.yaml.template | 176 ++++++++++++++++ .../pkg/fi/cloudup/bootstrapchannelbuilder.go | 22 ++ upup/pkg/fi/cloudup/populate_cluster_spec.go | 1 + 15 files changed, 651 insertions(+) create mode 100644 pkg/model/components/nodeterminationhandler.go create mode 100644 upup/models/cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index 9ed95dab375f9..7f0bda9dc20c7 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -2657,6 +2657,22 @@ spec: items: type: string type: array + nodeTerminationHandler: + description: NodeTerminationHandlerConfig determines the cluster autoscaler configuration. + properties: + enableScheduledEventDraining: + description: 'EnableScheduledEventDraining makes node termination handler drain nodes before the maintenance window starts for an EC2 instance scheduled event. Default: false' + type: boolean + enableSpotInterruptionDraining: + description: 'EnableSpotInterruptionDraining makes node termination handler drain nodes when spot interruption termination notice is received. Default: true' + type: boolean + enabled: + description: 'Enabled enables the node termination handler. Default: true' + type: boolean + prometheusEnable: + description: EnablePrometheusMetrics enables the "/metrics" endpoint. + type: boolean + type: object nonMasqueradeCIDR: description: MasterIPRange string `json:",omitempty"` NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live) It cannot overlap ServiceClusterIPRange type: string diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index 215ce4e149766..9fbd531bba12c 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -158,6 +158,9 @@ type ClusterSpec struct { CloudConfig *CloudConfiguration `json:"cloudConfig,omitempty"` ExternalDNS *ExternalDNSConfig `json:"externalDns,omitempty"` + // NodeTerminationHandlerConfig determines the cluster autoscaler configuration. + NodeTerminationHandler *NodeTerminationHandlerConfig `json:"nodeTerminationHandler,omitempty"` + // Networking configuration Networking *NetworkingSpec `json:"networking,omitempty"` // API field controls how the API is exposed outside the cluster diff --git a/pkg/apis/kops/componentconfig.go b/pkg/apis/kops/componentconfig.go index 1bfd615733280..afa46bd445ed6 100644 --- a/pkg/apis/kops/componentconfig.go +++ b/pkg/apis/kops/componentconfig.go @@ -759,6 +759,22 @@ type CloudConfiguration struct { Openstack *OpenstackConfiguration `json:"openstack,omitempty"` } +// NodeTerminationHandlerConfig determines the node termination handler configuration. +type NodeTerminationHandlerConfig struct { + // Enabled enables the node termination handler. + // Default: true + Enabled *bool `json:"enabled,omitempty"` + // EnableSpotInterruptionDraining makes node termination handler drain nodes when spot interruption termination notice is received. + // Default: true + EnableSpotInterruptionDraining *bool `json:"enableSpotInterruptionDraining,omitempty"` + // EnableScheduledEventDraining makes node termination handler drain nodes before the maintenance window starts for an EC2 instance scheduled event. + // Default: false + EnableScheduledEventDraining *bool `json:"enableScheduledEventDraining,omitempty"` + + // EnablePrometheusMetrics enables the "/metrics" endpoint. + EnablePrometheusMetrics *bool `json:"prometheusEnable,omitempty"` +} + // ClusterAutoscalerConfig determines the cluster autoscaler configuration. type ClusterAutoscalerConfig struct { // Enabled enables the cluster autoscaler. diff --git a/pkg/apis/kops/v1alpha2/cluster.go b/pkg/apis/kops/v1alpha2/cluster.go index 6d5e0684b9d49..9bd1a9625d5a9 100644 --- a/pkg/apis/kops/v1alpha2/cluster.go +++ b/pkg/apis/kops/v1alpha2/cluster.go @@ -156,6 +156,10 @@ type ClusterSpec struct { MasterKubelet *KubeletConfigSpec `json:"masterKubelet,omitempty"` CloudConfig *CloudConfiguration `json:"cloudConfig,omitempty"` ExternalDNS *ExternalDNSConfig `json:"externalDns,omitempty"` + + // NodeTerminationHandlerConfig determines the cluster autoscaler configuration. + NodeTerminationHandler *NodeTerminationHandlerConfig `json:"nodeTerminationHandler,omitempty"` + // Networking configuration Networking *NetworkingSpec `json:"networking,omitempty"` // API field controls how the API is exposed outside the cluster diff --git a/pkg/apis/kops/v1alpha2/componentconfig.go b/pkg/apis/kops/v1alpha2/componentconfig.go index d3e2e2f042cc5..ed9bbe4dde35b 100644 --- a/pkg/apis/kops/v1alpha2/componentconfig.go +++ b/pkg/apis/kops/v1alpha2/componentconfig.go @@ -760,6 +760,22 @@ type CloudConfiguration struct { Openstack *OpenstackConfiguration `json:"openstack,omitempty"` } +// NodeTerminationHandlerConfig determines the node termination handler configuration. +type NodeTerminationHandlerConfig struct { + // Enabled enables the node termination handler. + // Default: true + Enabled *bool `json:"enabled,omitempty"` + // EnableSpotInterruptionDraining makes node termination handler drain nodes when spot interruption termination notice is received. + // Default: true + EnableSpotInterruptionDraining *bool `json:"enableSpotInterruptionDraining,omitempty"` + // EnableScheduledEventDraining makes node termination handler drain nodes before the maintenance window starts for an EC2 instance scheduled event. + // Default: false + EnableScheduledEventDraining *bool `json:"enableScheduledEventDraining,omitempty"` + + // EnablePrometheusMetrics enables the "/metrics" endpoint. + EnablePrometheusMetrics *bool `json:"prometheusEnable,omitempty"` +} + // ClusterAutoscalerConfig determines the cluster autoscaler configuration. type ClusterAutoscalerConfig struct { // Enabled enables the cluster autoscaler. diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index 1ab4b3dae4601..a78f57a14e916 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -743,6 +743,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*NodeTerminationHandlerConfig)(nil), (*kops.NodeTerminationHandlerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NodeTerminationHandlerConfig_To_kops_NodeTerminationHandlerConfig(a.(*NodeTerminationHandlerConfig), b.(*kops.NodeTerminationHandlerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kops.NodeTerminationHandlerConfig)(nil), (*NodeTerminationHandlerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kops_NodeTerminationHandlerConfig_To_v1alpha2_NodeTerminationHandlerConfig(a.(*kops.NodeTerminationHandlerConfig), b.(*NodeTerminationHandlerConfig), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*OpenstackBlockStorageConfig)(nil), (*kops.OpenstackBlockStorageConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_OpenstackBlockStorageConfig_To_kops_OpenstackBlockStorageConfig(a.(*OpenstackBlockStorageConfig), b.(*kops.OpenstackBlockStorageConfig), scope) }); err != nil { @@ -2033,6 +2043,15 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out * } else { out.ExternalDNS = nil } + if in.NodeTerminationHandler != nil { + in, out := &in.NodeTerminationHandler, &out.NodeTerminationHandler + *out = new(kops.NodeTerminationHandlerConfig) + if err := Convert_v1alpha2_NodeTerminationHandlerConfig_To_kops_NodeTerminationHandlerConfig(*in, *out, s); err != nil { + return err + } + } else { + out.NodeTerminationHandler = nil + } if in.Networking != nil { in, out := &in.Networking, &out.Networking *out = new(kops.NetworkingSpec) @@ -2364,6 +2383,15 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, } else { out.ExternalDNS = nil } + if in.NodeTerminationHandler != nil { + in, out := &in.NodeTerminationHandler, &out.NodeTerminationHandler + *out = new(NodeTerminationHandlerConfig) + if err := Convert_kops_NodeTerminationHandlerConfig_To_v1alpha2_NodeTerminationHandlerConfig(*in, *out, s); err != nil { + return err + } + } else { + out.NodeTerminationHandler = nil + } if in.Networking != nil { in, out := &in.Networking, &out.Networking *out = new(NetworkingSpec) @@ -5056,6 +5084,32 @@ func Convert_kops_NodeLocalDNSConfig_To_v1alpha2_NodeLocalDNSConfig(in *kops.Nod return autoConvert_kops_NodeLocalDNSConfig_To_v1alpha2_NodeLocalDNSConfig(in, out, s) } +func autoConvert_v1alpha2_NodeTerminationHandlerConfig_To_kops_NodeTerminationHandlerConfig(in *NodeTerminationHandlerConfig, out *kops.NodeTerminationHandlerConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.EnableSpotInterruptionDraining = in.EnableSpotInterruptionDraining + out.EnableScheduledEventDraining = in.EnableScheduledEventDraining + out.EnablePrometheusMetrics = in.EnablePrometheusMetrics + return nil +} + +// Convert_v1alpha2_NodeTerminationHandlerConfig_To_kops_NodeTerminationHandlerConfig is an autogenerated conversion function. +func Convert_v1alpha2_NodeTerminationHandlerConfig_To_kops_NodeTerminationHandlerConfig(in *NodeTerminationHandlerConfig, out *kops.NodeTerminationHandlerConfig, s conversion.Scope) error { + return autoConvert_v1alpha2_NodeTerminationHandlerConfig_To_kops_NodeTerminationHandlerConfig(in, out, s) +} + +func autoConvert_kops_NodeTerminationHandlerConfig_To_v1alpha2_NodeTerminationHandlerConfig(in *kops.NodeTerminationHandlerConfig, out *NodeTerminationHandlerConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.EnableSpotInterruptionDraining = in.EnableSpotInterruptionDraining + out.EnableScheduledEventDraining = in.EnableScheduledEventDraining + out.EnablePrometheusMetrics = in.EnablePrometheusMetrics + return nil +} + +// Convert_kops_NodeTerminationHandlerConfig_To_v1alpha2_NodeTerminationHandlerConfig is an autogenerated conversion function. +func Convert_kops_NodeTerminationHandlerConfig_To_v1alpha2_NodeTerminationHandlerConfig(in *kops.NodeTerminationHandlerConfig, out *NodeTerminationHandlerConfig, s conversion.Scope) error { + return autoConvert_kops_NodeTerminationHandlerConfig_To_v1alpha2_NodeTerminationHandlerConfig(in, out, s) +} + func autoConvert_v1alpha2_OpenstackBlockStorageConfig_To_kops_OpenstackBlockStorageConfig(in *OpenstackBlockStorageConfig, out *kops.OpenstackBlockStorageConfig, s conversion.Scope) error { out.Version = in.Version out.IgnoreAZ = in.IgnoreAZ diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index 2f2f3291b612a..90bde7c02e033 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -836,6 +836,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = new(ExternalDNSConfig) (*in).DeepCopyInto(*out) } + if in.NodeTerminationHandler != nil { + in, out := &in.NodeTerminationHandler, &out.NodeTerminationHandler + *out = new(NodeTerminationHandlerConfig) + (*in).DeepCopyInto(*out) + } if in.Networking != nil { in, out := &in.Networking, &out.Networking *out = new(NetworkingSpec) @@ -3380,6 +3385,42 @@ func (in *NodeLocalDNSConfig) DeepCopy() *NodeLocalDNSConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeTerminationHandlerConfig) DeepCopyInto(out *NodeTerminationHandlerConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EnableSpotInterruptionDraining != nil { + in, out := &in.EnableSpotInterruptionDraining, &out.EnableSpotInterruptionDraining + *out = new(bool) + **out = **in + } + if in.EnableScheduledEventDraining != nil { + in, out := &in.EnableScheduledEventDraining, &out.EnableScheduledEventDraining + *out = new(bool) + **out = **in + } + if in.EnablePrometheusMetrics != nil { + in, out := &in.EnablePrometheusMetrics, &out.EnablePrometheusMetrics + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeTerminationHandlerConfig. +func (in *NodeTerminationHandlerConfig) DeepCopy() *NodeTerminationHandlerConfig { + if in == nil { + return nil + } + out := new(NodeTerminationHandlerConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenstackBlockStorageConfig) DeepCopyInto(out *OpenstackBlockStorageConfig) { *out = *in diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go index 20ce13adade66..b889156fa8f4d 100644 --- a/pkg/apis/kops/validation/validation.go +++ b/pkg/apis/kops/validation/validation.go @@ -154,6 +154,10 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie allErrs = append(allErrs, validateClusterAutoscaler(c, spec.ClusterAutoscaler, fieldPath.Child("clusterAutoscaler"))...) } + if spec.NodeTerminationHandler != nil { + allErrs = append(allErrs, validateNodeTerminationHandler(c, spec.NodeTerminationHandler, fieldPath.Child("nodeTerminationHandler"))...) + } + // IAM additionalPolicies if spec.AdditionalPolicies != nil { for k, v := range *spec.AdditionalPolicies { @@ -1140,3 +1144,10 @@ func validateClusterAutoscaler(cluster *kops.Cluster, spec *kops.ClusterAutoscal return allErrs } + +func validateNodeTerminationHandler(cluster *kops.Cluster, spec *kops.NodeTerminationHandlerConfig, fldPath *field.Path) (allErrs field.ErrorList) { + if kops.CloudProviderID(cluster.Spec.CloudProvider) != kops.CloudProviderAWS { + allErrs = append(allErrs, field.Forbidden(fldPath, "Node Termination Handler supports only AWS")) + } + return allErrs +} diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index 22abfb9a8e6a8..e51f78ba67813 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -936,6 +936,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = new(ExternalDNSConfig) (*in).DeepCopyInto(*out) } + if in.NodeTerminationHandler != nil { + in, out := &in.NodeTerminationHandler, &out.NodeTerminationHandler + *out = new(NodeTerminationHandlerConfig) + (*in).DeepCopyInto(*out) + } if in.Networking != nil { in, out := &in.Networking, &out.Networking *out = new(NetworkingSpec) @@ -3578,6 +3583,42 @@ func (in *NodeLocalDNSConfig) DeepCopy() *NodeLocalDNSConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeTerminationHandlerConfig) DeepCopyInto(out *NodeTerminationHandlerConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EnableSpotInterruptionDraining != nil { + in, out := &in.EnableSpotInterruptionDraining, &out.EnableSpotInterruptionDraining + *out = new(bool) + **out = **in + } + if in.EnableScheduledEventDraining != nil { + in, out := &in.EnableScheduledEventDraining, &out.EnableScheduledEventDraining + *out = new(bool) + **out = **in + } + if in.EnablePrometheusMetrics != nil { + in, out := &in.EnablePrometheusMetrics, &out.EnablePrometheusMetrics + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeTerminationHandlerConfig. +func (in *NodeTerminationHandlerConfig) DeepCopy() *NodeTerminationHandlerConfig { + if in == nil { + return nil + } + out := new(NodeTerminationHandlerConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NoopStatusStore) DeepCopyInto(out *NoopStatusStore) { *out = *in diff --git a/pkg/model/components/BUILD.bazel b/pkg/model/components/BUILD.bazel index f489f3768498b..f60a7caab4939 100644 --- a/pkg/model/components/BUILD.bazel +++ b/pkg/model/components/BUILD.bazel @@ -18,6 +18,7 @@ go_library( "kubeproxy.go", "kubescheduler.go", "networking.go", + "nodeterminationhandler.go", "openstack.go", ], importpath = "k8s.io/kops/pkg/model/components", diff --git a/pkg/model/components/nodeterminationhandler.go b/pkg/model/components/nodeterminationhandler.go new file mode 100644 index 0000000000000..8742828ec95c3 --- /dev/null +++ b/pkg/model/components/nodeterminationhandler.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package components + +import ( + "k8s.io/kops/pkg/apis/kops" + "k8s.io/kops/upup/pkg/fi" + "k8s.io/kops/upup/pkg/fi/loader" +) + +// NodeTerminationHandlerOptionsBuilder adds options for the node termination handler to the model. +type NodeTerminationHandlerOptionsBuilder struct { + *OptionsContext +} + +var _ loader.OptionsBuilder = &NodeTerminationHandlerOptionsBuilder{} + +func (b *NodeTerminationHandlerOptionsBuilder) BuildOptions(o interface{}) error { + clusterSpec := o.(*kops.ClusterSpec) + if clusterSpec.NodeTerminationHandler == nil { + return nil + } + nth := clusterSpec.NodeTerminationHandler + if nth.Enabled == nil { + nth.Enabled = fi.Bool(true) + } + if nth.EnableSpotInterruptionDraining == nil { + nth.EnableSpotInterruptionDraining = fi.Bool(true) + } + if nth.EnableScheduledEventDraining == nil { + nth.EnableScheduledEventDraining = fi.Bool(false) + } + + if nth.EnablePrometheusMetrics == nil { + nth.EnablePrometheusMetrics = fi.Bool(false) + } + return nil +} diff --git a/upup/models/bindata.go b/upup/models/bindata.go index 3113db4ee5b89..2eae3fcd721a6 100644 --- a/upup/models/bindata.go +++ b/upup/models/bindata.go @@ -51,6 +51,7 @@ // upup/models/cloudup/resources/addons/networking.weave/k8s-1.9.yaml.template // upup/models/cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template // upup/models/cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml.template +// upup/models/cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template // upup/models/cloudup/resources/addons/nodelocaldns.addons.k8s.io/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.13.yaml.template // upup/models/cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template @@ -18513,6 +18514,198 @@ func cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate() (*asset return a, nil } +var _cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate = []byte(`{{ with .Components.NodeTerminationHandler }} +# Sourced from https://github.com/aws/aws-node-termination-handler/releases/download/v1.7.0/all-resources.yaml +--- +# Source: aws-node-termination-handler/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: aws-node-termination-handler + namespace: kube-system + labels: + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/instance: aws-node-termination-handler + k8s-app: aws-node-termination-handler + app.kubernetes.io/version: "1.7.0" +--- +# Source: aws-node-termination-handler/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aws-node-termination-handler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - extensions + resources: + - daemonsets + verbs: + - get +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get +--- +# Source: aws-node-termination-handler/templates/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aws-node-termination-handler +subjects: +- kind: ServiceAccount + name: aws-node-termination-handler + namespace: kube-system +roleRef: + kind: ClusterRole + name: aws-node-termination-handler + apiGroup: rbac.authorization.k8s.io +--- +# Source: aws-node-termination-handler/templates/daemonset.linux.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: aws-node-termination-handler + namespace: kube-system + labels: + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/instance: aws-node-termination-handler + k8s-app: aws-node-termination-handler + app.kubernetes.io/version: "1.7.0" +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/instance: aws-node-termination-handler + kubernetes.io/os: linux + template: + metadata: + labels: + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/instance: aws-node-termination-handler + k8s-app: aws-node-termination-handler + kubernetes.io/os: linux + spec: + volumes: + - name: "uptime" + hostPath: + path: "/proc/uptime" + priorityClassName: "system-node-critical" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "kubernetes.io/os" + operator: In + values: + - linux + - key: "kubernetes.io/arch" + operator: In + values: + - amd64 + - arm64 + - arm + serviceAccountName: aws-node-termination-handler + hostNetwork: true + dnsPolicy: "ClusterFirstWithHostNet" + containers: + - name: aws-node-termination-handler + image: amazon/aws-node-termination-handler:v1.7.0 + imagePullPolicy: IfNotPresent + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + allowPrivilegeEscalation: false + volumeMounts: + - name: "uptime" + mountPath: "/proc/uptime" + readOnly: true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SPOT_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: DELETE_LOCAL_DATA + value: "true" + - name: IGNORE_DAEMON_SETS + value: "true" + - name: POD_TERMINATION_GRACE_PERIOD + value: "-1" + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: "{{ .EnableSpotInterruptionDraining }}" + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: "{{ .EnableScheduledEventDraining }}" + - name: JSON_LOGGING + value: "true" + - name: ENABLE_PROMETHEUS_SERVER + value: "{{ .EnablePrometheusMetrics }}" + resources: + limits: + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi + nodeSelector: + kubernetes.io/os: linux + tolerations: + - operator: Exists +{{ end }}`) + +func cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate, nil +} + +func cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplateBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplate = []byte(`# Vendored from https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml --- @@ -20486,6 +20679,7 @@ var _bindata = map[string]func() (*asset, error){ "cloudup/resources/addons/networking.weave/k8s-1.9.yaml.template": cloudupResourcesAddonsNetworkingWeaveK8s19YamlTemplate, "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template": cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s110YamlTemplate, "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate, + "cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template": cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate, "cloudup/resources/addons/nodelocaldns.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplate, "cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.13.yaml.template": cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplate, "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template": cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s110YamlTemplate, @@ -20641,6 +20835,9 @@ var _bintree = &bintree{nil, map[string]*bintree{ "k8s-1.10.yaml.template": {cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s110YamlTemplate, map[string]*bintree{}}, "k8s-1.12.yaml.template": {cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}}, }}, + "node-termination-handler.aws": {nil, map[string]*bintree{ + "k8s-1.11.yaml.template": {cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate, map[string]*bintree{}}, + }}, "nodelocaldns.addons.k8s.io": {nil, map[string]*bintree{ "k8s-1.12.yaml.template": {cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}}, }}, diff --git a/upup/models/cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template b/upup/models/cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template new file mode 100644 index 0000000000000..bf9d047a5d1c2 --- /dev/null +++ b/upup/models/cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template @@ -0,0 +1,176 @@ +{{ with .Components.NodeTerminationHandler }} +# Sourced from https://github.com/aws/aws-node-termination-handler/releases/download/v1.7.0/all-resources.yaml +--- +# Source: aws-node-termination-handler/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: aws-node-termination-handler + namespace: kube-system + labels: + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/instance: aws-node-termination-handler + k8s-app: aws-node-termination-handler + app.kubernetes.io/version: "1.7.0" +--- +# Source: aws-node-termination-handler/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aws-node-termination-handler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - extensions + resources: + - daemonsets + verbs: + - get +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get +--- +# Source: aws-node-termination-handler/templates/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aws-node-termination-handler +subjects: +- kind: ServiceAccount + name: aws-node-termination-handler + namespace: kube-system +roleRef: + kind: ClusterRole + name: aws-node-termination-handler + apiGroup: rbac.authorization.k8s.io +--- +# Source: aws-node-termination-handler/templates/daemonset.linux.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: aws-node-termination-handler + namespace: kube-system + labels: + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/instance: aws-node-termination-handler + k8s-app: aws-node-termination-handler + app.kubernetes.io/version: "1.7.0" +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/instance: aws-node-termination-handler + kubernetes.io/os: linux + template: + metadata: + labels: + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/instance: aws-node-termination-handler + k8s-app: aws-node-termination-handler + kubernetes.io/os: linux + spec: + volumes: + - name: "uptime" + hostPath: + path: "/proc/uptime" + priorityClassName: "system-node-critical" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "kubernetes.io/os" + operator: In + values: + - linux + - key: "kubernetes.io/arch" + operator: In + values: + - amd64 + - arm64 + - arm + serviceAccountName: aws-node-termination-handler + hostNetwork: true + dnsPolicy: "ClusterFirstWithHostNet" + containers: + - name: aws-node-termination-handler + image: amazon/aws-node-termination-handler:v1.7.0 + imagePullPolicy: IfNotPresent + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + allowPrivilegeEscalation: false + volumeMounts: + - name: "uptime" + mountPath: "/proc/uptime" + readOnly: true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SPOT_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: DELETE_LOCAL_DATA + value: "true" + - name: IGNORE_DAEMON_SETS + value: "true" + - name: POD_TERMINATION_GRACE_PERIOD + value: "-1" + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: "{{ .EnableSpotInterruptionDraining }}" + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: "{{ .EnableScheduledEventDraining }}" + - name: JSON_LOGGING + value: "true" + - name: ENABLE_PROMETHEUS_SERVER + value: "{{ .EnablePrometheusMetrics }}" + resources: + limits: + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi + nodeSelector: + kubernetes.io/os: linux + tolerations: + - operator: Exists +{{ end }} \ No newline at end of file diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index a35973d81a82f..b2ad6a5d556a2 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -563,6 +563,28 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann } } + nth := b.Cluster.Spec.NodeTerminationHandler + + if nth != nil && fi.BoolValue(nth.Enabled) { + + key := "node-termination-handler.aws" + version := "1.7.0" + + { + location := key + "/k8s-1.11.yaml" + id := "k8s-1.11" + + addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + KubernetesVersion: ">=1.11.0", + Id: id, + }) + } + } + if kops.CloudProviderID(b.Cluster.Spec.CloudProvider) == kops.CloudProviderAWS { key := "storage-aws.addons.k8s.io" version := "1.15.0" diff --git a/upup/pkg/fi/cloudup/populate_cluster_spec.go b/upup/pkg/fi/cloudup/populate_cluster_spec.go index 032b76502f6df..7553d7102d501 100644 --- a/upup/pkg/fi/cloudup/populate_cluster_spec.go +++ b/upup/pkg/fi/cloudup/populate_cluster_spec.go @@ -279,6 +279,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error { codeModels = append(codeModels, &components.OpenStackOptionsBulder{Context: optionsContext}) codeModels = append(codeModels, &components.DiscoveryOptionsBuilder{OptionsContext: optionsContext}) codeModels = append(codeModels, &components.ClusterAutoscalerOptionsBuilder{OptionsContext: optionsContext}) + codeModels = append(codeModels, &components.NodeTerminationHandlerOptionsBuilder{OptionsContext: optionsContext}) } }