From 03e5826575fd2c33284a280a5f1d7b0d32dd1e83 Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 15:56:52 -0500 Subject: [PATCH 01/13] clear-containers: add runtime to api and pass through parameters Signed-off-by: Jess Frazelle --- parts/k8s/kubernetesmastervars.t | 2 +- parts/k8s/kubernetesparams.t | 11 ++++++++++ pkg/acsengine/const.go | 2 ++ pkg/acsengine/defaults.go | 3 +++ pkg/acsengine/engine.go | 1 + pkg/api/convertertoapi.go | 1 + pkg/api/types.go | 1 + pkg/api/vlabs/const.go | 3 ++- pkg/api/vlabs/types.go | 1 + pkg/api/vlabs/validate.go | 35 +++++++++++++++++++++++++++++++ pkg/api/vlabs/validate_test.go | 36 ++++++++++++++++++++++++++++++++ 11 files changed, 94 insertions(+), 2 deletions(-) diff --git a/parts/k8s/kubernetesmastervars.t b/parts/k8s/kubernetesmastervars.t index bed18bb84b..ef9b447d2b 100644 --- a/parts/k8s/kubernetesmastervars.t +++ b/parts/k8s/kubernetesmastervars.t @@ -171,7 +171,7 @@ {{end}} "provisionScript": "{{GetKubernetesB64Provision}}", "mountetcdScript": "{{GetKubernetesB64Mountetcd}}", - "provisionScriptParametersCommon": "[concat('TENANT_ID=',variables('tenantID'),' APISERVER_PUBLIC_KEY=',variables('apiserverCertificate'),' SUBSCRIPTION_ID=',variables('subscriptionId'),' RESOURCE_GROUP=',variables('resourceGroup'),' LOCATION=',variables('location'),' SUBNET=',variables('subnetName'),' NETWORK_SECURITY_GROUP=',variables('nsgName'),' VIRTUAL_NETWORK=',variables('virtualNetworkName'),' VIRTUAL_NETWORK_RESOURCE_GROUP=',variables('virtualNetworkResourceGroupName'),' ROUTE_TABLE=',variables('routeTableName'),' PRIMARY_AVAILABILITY_SET=',variables('primaryAvailabilitySetName'),' SERVICE_PRINCIPAL_CLIENT_ID=',variables('servicePrincipalClientId'),' SERVICE_PRINCIPAL_CLIENT_SECRET=',variables('servicePrincipalClientSecret'),' KUBELET_PRIVATE_KEY=',variables('clientPrivateKey'),' TARGET_ENVIRONMENT=',variables('targetEnvironment'),' NETWORK_POLICY=',variables('networkPolicy'),' FQDNSuffix=',variables('fqdnEndpointSuffix'),' VNET_CNI_PLUGINS_URL=',variables('vnetCniLinuxPluginsURL'),' CNI_PLUGINS_URL=',variables('cniPluginsURL'),' MAX_PODS=',variables('maxPods'),' CLOUDPROVIDER_BACKOFF=',variables('cloudProviderBackoff'),' CLOUDPROVIDER_BACKOFF_RETRIES=',variables('cloudProviderBackoffRetries'),' CLOUDPROVIDER_BACKOFF_EXPONENT=',variables('cloudProviderBackoffExponent'),' CLOUDPROVIDER_BACKOFF_DURATION=',variables('cloudProviderBackoffDuration'),' CLOUDPROVIDER_BACKOFF_JITTER=',variables('cloudProviderBackoffJitter'),' CLOUDPROVIDER_RATELIMIT=',variables('cloudProviderRatelimit'),' CLOUDPROVIDER_RATELIMIT_QPS=',variables('cloudProviderRatelimitQPS'),' CLOUDPROVIDER_RATELIMIT_BUCKET=',variables('cloudProviderRatelimitBucket'),' USE_MANAGED_IDENTITY_EXTENSION=',variables('useManagedIdentityExtension'),' USE_INSTANCE_METADATA=',variables('useInstanceMetadata'))]", + "provisionScriptParametersCommon": "[concat('TENANT_ID=',variables('tenantID'),' APISERVER_PUBLIC_KEY=',variables('apiserverCertificate'),' SUBSCRIPTION_ID=',variables('subscriptionId'),' RESOURCE_GROUP=',variables('resourceGroup'),' LOCATION=',variables('location'),' SUBNET=',variables('subnetName'),' NETWORK_SECURITY_GROUP=',variables('nsgName'),' VIRTUAL_NETWORK=',variables('virtualNetworkName'),' VIRTUAL_NETWORK_RESOURCE_GROUP=',variables('virtualNetworkResourceGroupName'),' ROUTE_TABLE=',variables('routeTableName'),' PRIMARY_AVAILABILITY_SET=',variables('primaryAvailabilitySetName'),' SERVICE_PRINCIPAL_CLIENT_ID=',variables('servicePrincipalClientId'),' SERVICE_PRINCIPAL_CLIENT_SECRET=',variables('servicePrincipalClientSecret'),' KUBELET_PRIVATE_KEY=',variables('clientPrivateKey'),' TARGET_ENVIRONMENT=',variables('targetEnvironment'),' NETWORK_POLICY=',variables('networkPolicy'),' FQDNSuffix=',variables('fqdnEndpointSuffix'),' VNET_CNI_PLUGINS_URL=',variables('vnetCniLinuxPluginsURL'),' CNI_PLUGINS_URL=',variables('cniPluginsURL'),' MAX_PODS=',variables('maxPods'),' CLOUDPROVIDER_BACKOFF=',variables('cloudProviderBackoff'),' CLOUDPROVIDER_BACKOFF_RETRIES=',variables('cloudProviderBackoffRetries'),' CLOUDPROVIDER_BACKOFF_EXPONENT=',variables('cloudProviderBackoffExponent'),' CLOUDPROVIDER_BACKOFF_DURATION=',variables('cloudProviderBackoffDuration'),' CLOUDPROVIDER_BACKOFF_JITTER=',variables('cloudProviderBackoffJitter'),' CLOUDPROVIDER_RATELIMIT=',variables('cloudProviderRatelimit'),' CLOUDPROVIDER_RATELIMIT_QPS=',variables('cloudProviderRatelimitQPS'),' CLOUDPROVIDER_RATELIMIT_BUCKET=',variables('cloudProviderRatelimitBucket'),' USE_MANAGED_IDENTITY_EXTENSION=',variables('useManagedIdentityExtension'),' USE_INSTANCE_METADATA=',variables('useInstanceMetadata'),' CONTAINER_RUNTIME=',variables('containerRuntime'))]", {{if not IsHostedMaster}} "provisionScriptParametersMaster": "[concat('APISERVER_PRIVATE_KEY=',variables('apiServerPrivateKey'),' CA_CERTIFICATE=',variables('caCertificate'),' CA_PRIVATE_KEY=',variables('caPrivateKey'),' MASTER_FQDN=',variables('masterFqdnPrefix'),' KUBECONFIG_CERTIFICATE=',variables('kubeConfigCertificate'),' KUBECONFIG_KEY=',variables('kubeConfigPrivateKey'),' ETCD_SERVER_CERTIFICATE=',variables('etcdServerCertificate'),' ETCD_CLIENT_CERTIFICATE=',variables('etcdClientCertificate'),' ETCD_SERVER_PRIVATE_KEY=',variables('etcdServerPrivateKey'),' ETCD_CLIENT_PRIVATE_KEY=',variables('etcdClientPrivateKey'),' ETCD_PEER_CERTIFICATES=',string(variables('etcdPeerCertificates')),' ETCD_PEER_PRIVATE_KEYS=',string(variables('etcdPeerPrivateKeys')),' ADMINUSER=',variables('username'))]", diff --git a/parts/k8s/kubernetesparams.t b/parts/k8s/kubernetesparams.t index fe693177d1..e29a09e074 100644 --- a/parts/k8s/kubernetesparams.t +++ b/parts/k8s/kubernetesparams.t @@ -506,6 +506,17 @@ ], "type": "string" }, + "containerRuntime": { + "defaultValue": "{{.OrchestratorProfile.KubernetesConfig.ContainerRuntime}}", + "metadata": { + "description": "The container runtime to use (docker|clear-containers)" + }, + "allowedValues": [ + "docker", + "clear-containers" + ], + "type": "string" + }, "cniPluginsURL": { "defaultValue": "https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-latest.tgz", "type": "string" diff --git a/pkg/acsengine/const.go b/pkg/acsengine/const.go index f9a7be62f6..e780cd265b 100644 --- a/pkg/acsengine/const.go +++ b/pkg/acsengine/const.go @@ -44,6 +44,8 @@ const ( DefaultNetworkPolicy = NetworkPolicyNone // DefaultNetworkPolicyWindows defines the network policy to use by default for clusters with Windows agent pools DefaultNetworkPolicyWindows = NetworkPolicyNone + // DefaultContainerRuntime is docker + DefaultContainerRuntime = "docker" // DefaultKubernetesNodeStatusUpdateFrequency is 10s, see --node-status-update-frequency at https://kubernetes.io/docs/admin/kubelet/ DefaultKubernetesNodeStatusUpdateFrequency = "10s" // DefaultKubernetesHardEvictionThreshold is memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%, see --eviction-hard at https://kubernetes.io/docs/admin/kubelet/ diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index 596590d6ad..cd9c640778 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -315,6 +315,9 @@ func setOrchestratorDefaults(cs *api.ContainerService) { o.KubernetesConfig.NetworkPolicy = DefaultNetworkPolicy } } + if o.KubernetesConfig.ContainerRuntime == "" { + o.KubernetesConfig.ContainerRuntime = DefaultContainerRuntime + } if o.KubernetesConfig.ClusterSubnet == "" { if o.IsAzureCNI() { // When VNET integration is enabled, all masters, agents and pods share the same large subnet. diff --git a/pkg/acsengine/engine.go b/pkg/acsengine/engine.go index 3c42bd8bb0..a5a2945646 100644 --- a/pkg/acsengine/engine.go +++ b/pkg/acsengine/engine.go @@ -612,6 +612,7 @@ func getParameters(cs *api.ContainerService, isClassicMode bool, generatorCode s } addValue(parametersMap, "dockerBridgeCidr", properties.OrchestratorProfile.KubernetesConfig.DockerBridgeSubnet) addValue(parametersMap, "networkPolicy", properties.OrchestratorProfile.KubernetesConfig.NetworkPolicy) + addValue(parametersMap, "containerRuntime", properties.OrchestratorProfile.KubernetesConfig.ContainerRuntime) addValue(parametersMap, "cniPluginsURL", cloudSpecConfig.KubernetesSpecConfig.CNIPluginsDownloadURL) addValue(parametersMap, "vnetCniLinuxPluginsURL", cloudSpecConfig.KubernetesSpecConfig.VnetCNILinuxPluginsDownloadURL) addValue(parametersMap, "vnetCniWindowsPluginsURL", cloudSpecConfig.KubernetesSpecConfig.VnetCNIWindowsPluginsDownloadURL) diff --git a/pkg/api/convertertoapi.go b/pkg/api/convertertoapi.go index 3f54f8c729..025a4f22f8 100644 --- a/pkg/api/convertertoapi.go +++ b/pkg/api/convertertoapi.go @@ -593,6 +593,7 @@ func convertVLabsKubernetesConfig(vlabs *vlabs.KubernetesConfig, api *Kubernetes api.DNSServiceIP = vlabs.DNSServiceIP api.ServiceCIDR = vlabs.ServiceCidr api.NetworkPolicy = vlabs.NetworkPolicy + api.ContainerRuntime = vlabs.ContainerRuntime api.MaxPods = vlabs.MaxPods api.DockerBridgeSubnet = vlabs.DockerBridgeSubnet api.CloudProviderBackoff = vlabs.CloudProviderBackoff diff --git a/pkg/api/types.go b/pkg/api/types.go index e2193508cb..4a137a55da 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -223,6 +223,7 @@ type KubernetesConfig struct { KubernetesImageBase string `json:"kubernetesImageBase,omitempty"` ClusterSubnet string `json:"clusterSubnet,omitempty"` NetworkPolicy string `json:"networkPolicy,omitempty"` + ContainerRuntime string `json:"containerRuntime,omitempty"` MaxPods int `json:"maxPods,omitempty"` DockerBridgeSubnet string `json:"dockerBridgeSubnet,omitempty"` DNSServiceIP string `json:"dnsServiceIP,omitempty"` diff --git a/pkg/api/vlabs/const.go b/pkg/api/vlabs/const.go index aa3cbb3bc8..f76c80f48c 100644 --- a/pkg/api/vlabs/const.go +++ b/pkg/api/vlabs/const.go @@ -68,9 +68,10 @@ const ( ManagedDisks = "ManagedDisks" ) -// Network policy var ( NetworkPolicyValues = [...]string{"", "none", "azure", "calico"} + + ContainerRuntimeValues = [...]string{"", "docker", "clear-containers"} ) // Kubernetes configuration diff --git a/pkg/api/vlabs/types.go b/pkg/api/vlabs/types.go index 7bad4798f8..ac1b66b9e7 100644 --- a/pkg/api/vlabs/types.go +++ b/pkg/api/vlabs/types.go @@ -233,6 +233,7 @@ type KubernetesConfig struct { DNSServiceIP string `json:"dnsServiceIP,omitempty"` ServiceCidr string `json:"serviceCidr,omitempty"` NetworkPolicy string `json:"networkPolicy,omitempty"` + ContainerRuntime string `json:"containerRuntime,omitempty"` MaxPods int `json:"maxPods,omitempty"` DockerBridgeSubnet string `json:"dockerBridgeSubnet,omitempty"` CloudProviderConfig diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index 5ddcd18a47..5af23ac37b 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -315,6 +315,9 @@ func (a *Properties) Validate(isUpdate bool) error { if e := a.validateNetworkPolicy(); e != nil { return e } + if e := a.validateContainerRuntime(); e != nil { + return e + } if e := a.MasterProfile.Validate(); e != nil { return e } @@ -672,6 +675,38 @@ func (a *Properties) validateNetworkPolicy() error { return nil } +func (a *Properties) validateContainerRuntime() error { + var containerRuntime string + + switch a.OrchestratorProfile.OrchestratorType { + case Kubernetes: + if a.OrchestratorProfile.KubernetesConfig != nil { + containerRuntime = a.OrchestratorProfile.KubernetesConfig.ContainerRuntime + } + default: + return nil + } + + // Check ContainerRuntime has a valid value. + valid := false + for _, runtime := range ContainerRuntimeValues { + if containerRuntime == runtime { + valid = true + break + } + } + if !valid { + return fmt.Errorf("unknown containerRuntime %q specified", containerRuntime) + } + + // Make sure we don't use clear containers on windows. + if containerRuntime == "clear-containers" && a.HasWindows() { + return fmt.Errorf("containerRuntime %q is not supporting windows agents", containerRuntime) + } + + return nil +} + func validateName(name string, label string) error { if name == "" { return fmt.Errorf("%s must be a non-empty value", label) diff --git a/pkg/api/vlabs/validate_test.go b/pkg/api/vlabs/validate_test.go index 0e92f5cc3f..893cc873b6 100644 --- a/pkg/api/vlabs/validate_test.go +++ b/pkg/api/vlabs/validate_test.go @@ -512,3 +512,39 @@ func getK8sDefaultProperties() *Properties { }, } } + +func Test_Properties_ValidateContainerRuntime(t *testing.T) { + p := &Properties{} + p.OrchestratorProfile = &OrchestratorProfile{} + p.OrchestratorProfile.OrchestratorType = Kubernetes + + for _, policy := range ContainerRuntimeValues { + p.OrchestratorProfile.KubernetesConfig = &KubernetesConfig{} + p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = policy + if err := p.validateContainerRuntime(); err != nil { + t.Errorf( + "should not error on containerRuntime=\"%s\"", + policy, + ) + } + } + + p.OrchestratorProfile.KubernetesConfig.ContainerRuntime = "not-existing" + if err := p.validateContainerRuntime(); err == nil { + t.Errorf( + "should error on invalid containerRuntime", + ) + } + + p.OrchestratorProfile.KubernetesConfig.ContainerRuntime = "clear-containers" + p.AgentPoolProfiles = []*AgentPoolProfile{ + { + OSType: Windows, + }, + } + if err := p.validateContainerRuntime(); err == nil { + t.Errorf( + "should error on clear-containers for windows clusters", + ) + } +} From d78f2b28cfe302d0b687752a9d9d9eab2ae88341 Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 16:13:32 -0500 Subject: [PATCH 02/13] clear-containers: add scripts Signed-off-by: Jess Frazelle --- parts/k8s/artifacts/kuberneteskubelet.service | 1 + parts/k8s/kubernetesagentcustomdata.yml | 14 +- parts/k8s/kubernetesmastercustomdata.yml | 1 + parts/k8s/kubernetesmastercustomscript.sh | 718 +++++++++++------- 4 files changed, 469 insertions(+), 265 deletions(-) diff --git a/parts/k8s/artifacts/kuberneteskubelet.service b/parts/k8s/artifacts/kuberneteskubelet.service index cf84a3136c..a437e56565 100644 --- a/parts/k8s/artifacts/kuberneteskubelet.service +++ b/parts/k8s/artifacts/kuberneteskubelet.service @@ -25,6 +25,7 @@ ExecStart=/usr/bin/docker run \ --volume=/sys:/sys:ro \ --volume=/var/run:/var/run:rw \ --volume=/var/lib/docker/:/var/lib/docker:rw \ + --volume=/var/lib/containers/:/var/lib/containers:rw \ --volume=/var/lib/kubelet/:/var/lib/kubelet:shared \ --volume=/var/log:/var/log:rw \ --volume=/etc/kubernetes/:/etc/kubernetes:ro \ diff --git a/parts/k8s/kubernetesagentcustomdata.yml b/parts/k8s/kubernetesagentcustomdata.yml index 6bf37aab6d..629a5b8dc8 100644 --- a/parts/k8s/kubernetesagentcustomdata.yml +++ b/parts/k8s/kubernetesagentcustomdata.yml @@ -91,7 +91,7 @@ write_files: {{if .IsCoreOS}} ExecStartPre=/bin/mv /tmp/kubectldir/hyperkube /opt/kubectl ExecStart=/bin/chmod a+x /opt/kubectl -{{else}} +{{else}} ExecStartPre=/bin/mv /tmp/kubectldir/hyperkube /usr/local/bin/kubectl ExecStart=/bin/chmod a+x /usr/local/bin/kubectl {{end}} @@ -158,20 +158,20 @@ coreos: command: "start" content: | # Note: Initiated as a service since there is no runcmd within CoreOS on cloud-config/Ignition - [Unit] + [Unit] Description=Start provision setup service [Service] ExecStart=/opt/azure/containers/provision-setup.sh {{else}} runcmd: -- echo `date`,`hostname`, startruncmd>>/opt/m +- echo `date`,`hostname`, startruncmd>>/opt/m - apt-mark hold walinuxagent{{GetKubernetesAgentPreprovisionYaml .}} -- echo `date`,`hostname`, preaptupdate>>/opt/m +- echo `date`,`hostname`, preaptupdate>>/opt/m - apt-get update -- echo `date`,`hostname`, postaptupdate>>/opt/m +- echo `date`,`hostname`, postaptupdate>>/opt/m - apt-get install -y apt-transport-https ca-certificates nfs-common -- echo `date`,`hostname`, aptinstall>>/opt/m +- echo `date`,`hostname`, aptinstall>>/opt/m - systemctl enable rpcbind - systemctl enable rpc-statd - systemctl start rpcbind @@ -193,5 +193,5 @@ runcmd: - echo `date`,`hostname`, POST-APT-SYSTEMD-DAILY>>/opt/m - apt-mark unhold walinuxagent - mkdir -p /opt/azure/containers && touch /opt/azure/containers/runcmd.complete -- echo `date`,`hostname`, endruncmd>>/opt/m +- echo `date`,`hostname`, endruncmd>>/opt/m {{end}} diff --git a/parts/k8s/kubernetesmastercustomdata.yml b/parts/k8s/kubernetesmastercustomdata.yml index fe139c44ba..a179c9ac1a 100644 --- a/parts/k8s/kubernetesmastercustomdata.yml +++ b/parts/k8s/kubernetesmastercustomdata.yml @@ -149,6 +149,7 @@ MASTER_ADDONS_CONFIG_PLACEHOLDER KUBELET_CONFIG={{GetKubeletConfigKeyVals .MasterProfile.KubernetesConfig}} KUBELET_IMAGE={{WrapAsVariable "kubernetesHyperkubeSpec"}} DOCKER_OPTS= + KUBELET_OPTS= KUBELET_NODE_LABELS={{GetMasterKubernetesLabels "',variables('labelResourceGroup'),'"}} {{if IsKubernetesVersionGe "1.6.0"}} {{if HasLinuxAgents}} diff --git a/parts/k8s/kubernetesmastercustomscript.sh b/parts/k8s/kubernetesmastercustomscript.sh index 9a3ad9e9f8..875eee77ad 100644 --- a/parts/k8s/kubernetesmastercustomscript.sh +++ b/parts/k8s/kubernetesmastercustomscript.sh @@ -20,7 +20,7 @@ # Master only secrets # APISERVER_PRIVATE_KEY CA_CERTIFICATE CA_PRIVATE_KEY MASTER_FQDN KUBECONFIG_CERTIFICATE -# KUBECONFIG_KEY ETCD_SERVER_CERTIFICATE ETCD_SERVER_PRIVATE_KEY ETCD_CLIENT_CERTIFICATE ETCD_CLIENT_PRIVATE_KEY +# KUBECONFIG_KEY ETCD_SERVER_CERTIFICATE ETCD_SERVER_PRIVATE_KEY ETCD_CLIENT_CERTIFICATE ETCD_CLIENT_PRIVATE_KEY # ETCD_PEER_CERTIFICATES ETCD_PEER_PRIVATE_KEYS ADMINUSER MASTER_INDEX # Find distro name via ID value in releases files and upcase @@ -38,59 +38,59 @@ ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 # CoreOS: /usr is read-only; therefore kubectl is installed at /opt/kubectl # Details on install at kubernetetsmastercustomdataforcoreos.yml if [[ $OS == $COREOS_OS_NAME ]]; then - echo "Changing default kubectl bin location" - KUBECTL=/opt/kubectl + echo "Changing default kubectl bin location" + KUBECTL=/opt/kubectl fi # cloudinit runcmd and the extension will run in parallel, this is to ensure # runcmd finishes ensureRunCommandCompleted() { - echo "waiting for runcmd to finish" - for i in {1..900}; do - if [ -e /opt/azure/containers/runcmd.complete ]; then - echo "runcmd finished" - break - fi - sleep 1 - done + echo "waiting for runcmd to finish" + for i in {1..900}; do + if [ -e /opt/azure/containers/runcmd.complete ]; then + echo "runcmd finished" + break + fi + sleep 1 + done } echo `date`,`hostname`, startscript>>/opt/m # A delay to start the kubernetes processes is necessary -# if a reboot is required. Otherwise, the agents will encounter issue: +# if a reboot is required. Otherwise, the agents will encounter issue: # https://github.com/kubernetes/kubernetes/issues/41185 if [ -f /var/run/reboot-required ]; then - REBOOTREQUIRED=true + REBOOTREQUIRED=true else - REBOOTREQUIRED=false + REBOOTREQUIRED=false fi # If APISERVER_PRIVATE_KEY is empty, then we are not on the master if [[ ! -z "${APISERVER_PRIVATE_KEY}" ]]; then - echo "APISERVER_PRIVATE_KEY is non-empty, assuming master node" + echo "APISERVER_PRIVATE_KEY is non-empty, assuming master node" - APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" - touch "${APISERVER_PRIVATE_KEY_PATH}" - chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" - chown root:root "${APISERVER_PRIVATE_KEY_PATH}" - echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" + APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" + touch "${APISERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" + chown root:root "${APISERVER_PRIVATE_KEY_PATH}" + echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" else - echo "APISERVER_PRIVATE_KEY is empty, assuming worker node" + echo "APISERVER_PRIVATE_KEY is empty, assuming worker node" fi # If CA_PRIVATE_KEY is empty, then we are not on the master if [[ ! -z "${CA_PRIVATE_KEY}" ]]; then - echo "CA_KEY is non-empty, assuming master node" + echo "CA_KEY is non-empty, assuming master node" - CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" - touch "${CA_PRIVATE_KEY_PATH}" - chmod 0600 "${CA_PRIVATE_KEY_PATH}" - chown root:root "${CA_PRIVATE_KEY_PATH}" - echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" + CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" + touch "${CA_PRIVATE_KEY_PATH}" + chmod 0600 "${CA_PRIVATE_KEY_PATH}" + chown root:root "${CA_PRIVATE_KEY_PATH}" + echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" else - echo "CA_PRIVATE_KEY is empty, assuming worker node" + echo "CA_PRIVATE_KEY is empty, assuming worker node" fi ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key" @@ -150,29 +150,29 @@ chmod 0600 "${AZURE_JSON_PATH}" chown root:root "${AZURE_JSON_PATH}" cat << EOF > "${AZURE_JSON_PATH}" { - "cloud":"${TARGET_ENVIRONMENT}", - "tenantId": "${TENANT_ID}", - "subscriptionId": "${SUBSCRIPTION_ID}", - "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", - "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", - "resourceGroup": "${RESOURCE_GROUP}", - "location": "${LOCATION}", - "subnetName": "${SUBNET}", - "securityGroupName": "${NETWORK_SECURITY_GROUP}", - "vnetName": "${VIRTUAL_NETWORK}", - "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", - "routeTableName": "${ROUTE_TABLE}", - "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", - "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, - "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, - "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, - "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, - "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, - "cloudProviderRatelimit": ${CLOUDPROVIDER_RATELIMIT}, - "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, - "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, - "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, - "useInstanceMetadata": ${USE_INSTANCE_METADATA} + "cloud":"${TARGET_ENVIRONMENT}", + "tenantId": "${TENANT_ID}", + "subscriptionId": "${SUBSCRIPTION_ID}", + "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", + "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", + "resourceGroup": "${RESOURCE_GROUP}", + "location": "${LOCATION}", + "subnetName": "${SUBNET}", + "securityGroupName": "${NETWORK_SECURITY_GROUP}", + "vnetName": "${VIRTUAL_NETWORK}", + "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", + "routeTableName": "${ROUTE_TABLE}", + "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", + "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, + "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, + "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, + "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, + "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, + "cloudProviderRatelimit": ${CLOUDPROVIDER_RATELIMIT}, + "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, + "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, + "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, + "useInstanceMetadata": ${USE_INSTANCE_METADATA} } EOF @@ -184,26 +184,26 @@ set -x # wait for kubectl to report successful cluster health function ensureKubectl() { - if $REBOOTREQUIRED; then - return - fi - kubectlfound=1 - for i in {1..600}; do - if [ -e $KUBECTL ] - then - kubectlfound=0 - break - fi - sleep 1 - done - if [ $kubectlfound -ne 0 ] - then - if [ ! -e /usr/bin/docker ] - then - echo "kubectl nor docker did not install successfully" - exit 1 - fi - fi + if $REBOOTREQUIRED; then + return + fi + kubectlfound=1 + for i in {1..600}; do + if [ -e $KUBECTL ] + then + kubectlfound=0 + break + fi + sleep 1 + done + if [ $kubectlfound -ne 0 ] + then + if [ ! -e /usr/bin/docker ] + then + echo "kubectl nor docker did not install successfully" + exit 1 + fi + fi } function downloadUrl () { @@ -213,27 +213,31 @@ function downloadUrl () { } function setMaxPods () { - sed -i "s/^KUBELET_MAX_PODS=.*/KUBELET_MAX_PODS=${1}/" /etc/default/kubelet + sed -i "s/^KUBELET_MAX_PODS=.*/KUBELET_MAX_PODS=${1}/" /etc/default/kubelet } function setNetworkPlugin () { - sed -i "s/^KUBELET_NETWORK_PLUGIN=.*/KUBELET_NETWORK_PLUGIN=${1}/" /etc/default/kubelet + sed -i "s/^KUBELET_NETWORK_PLUGIN=.*/KUBELET_NETWORK_PLUGIN=${1}/" /etc/default/kubelet +} + +function setKubeletOpts () { + sed -i "s#^KUBELET_OPTS=.*#KUBELET_OPTS=${1}#" /etc/default/kubelet } function setDockerOpts () { - sed -i "s#^DOCKER_OPTS=.*#DOCKER_OPTS=${1}#" /etc/default/kubelet + sed -i "s#^DOCKER_OPTS=.*#DOCKER_OPTS=${1}#" /etc/default/kubelet } function configAzureNetworkPolicy() { - CNI_CONFIG_DIR=/etc/cni/net.d - mkdir -p $CNI_CONFIG_DIR + CNI_CONFIG_DIR=/etc/cni/net.d + mkdir -p $CNI_CONFIG_DIR - chown -R root:root $CNI_CONFIG_DIR - chmod 755 $CNI_CONFIG_DIR + chown -R root:root $CNI_CONFIG_DIR + chmod 755 $CNI_CONFIG_DIR - # Download Azure VNET CNI plugins. - CNI_BIN_DIR=/opt/cni/bin - mkdir -p $CNI_BIN_DIR + # Download Azure VNET CNI plugins. + CNI_BIN_DIR=/opt/cni/bin + mkdir -p $CNI_BIN_DIR # Mirror from https://github.com/Azure/azure-container-networking/releases/tag/$AZURE_PLUGIN_VER/azure-vnet-cni-linux-amd64-$AZURE_PLUGIN_VER.tgz downloadUrl ${VNET_CNI_PLUGINS_URL} | tar -xz -C $CNI_BIN_DIR @@ -242,97 +246,284 @@ function configAzureNetworkPolicy() { chown -R root:root $CNI_BIN_DIR chmod -R 755 $CNI_BIN_DIR - # Copy config file - mv $CNI_BIN_DIR/10-azure.conf $CNI_CONFIG_DIR/ - chmod 600 $CNI_CONFIG_DIR/10-azure.conf + # Copy config file + mv $CNI_BIN_DIR/10-azure.conf $CNI_CONFIG_DIR/ + chmod 600 $CNI_CONFIG_DIR/10-azure.conf - # Dump ebtables rules. - /sbin/ebtables -t nat --list + # Dump ebtables rules. + /sbin/ebtables -t nat --list - # Enable CNI. - setNetworkPlugin cni - setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" + # Enable CNI. + setNetworkPlugin cni + setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" } # Configures Kubelet to use CNI and mount the appropriate hostpaths function configCalicoNetworkPolicy() { - setNetworkPlugin cni - setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" + setNetworkPlugin cni + setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" } function configNetworkPolicy() { - if [[ "${NETWORK_POLICY}" = "azure" ]]; then - configAzureNetworkPolicy - elif [[ "${NETWORK_POLICY}" = "calico" ]]; then - configCalicoNetworkPolicy - else - # No policy, defaults to kubenet. - setNetworkPlugin kubenet - setDockerOpts "" - fi + if [[ "${NETWORK_POLICY}" = "azure" ]]; then + configAzureNetworkPolicy + elif [[ "${NETWORK_POLICY}" = "calico" ]]; then + configCalicoNetworkPolicy + else + # No policy, defaults to kubenet. + setNetworkPlugin kubenet + setDockerOpts "" + fi +} + +# Install the Clear Containers runtime +function installClearContainersRuntime() { + # Add Clear Containers repository key + echo "Adding Clear Containers repository key..." + curl -sSL "https://download.opensuse.org/repositories/home:clearcontainers:clear-containers-3/xUbuntu_16.04/Release.key" | apt-key add - + + # Add Clear Container repository + echo "Adding Clear Containers repository..." + echo 'deb http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/ /' > /etc/apt/sources.list.d/cc-runtime.list + + # Install Clear Containers runtime + echo "Installing Clear Containers runtime..." + apt-get update + apt-get install --no-install-recommends -y \ + cc-runtime + + # Install thin tools for devicemapper configuration + echo "Installing thin tools to provision devicemapper..." + apt-get install --no-install-recommends -y \ + lvm2 \ + thin-provisioning-tools + + # Load butts changes + echo "Loading changes to butts service files..." + systemctl daemon-reload + + # Enable and start Clear Containers proxy service + echo "Enabling and starting Clear Containers proxy service..." + systemctl enable cc-proxy + systemctl start cc-proxy + + installCNI + setNetworkPlugin cni + setKubeletOpts " --container-runtime=remote --container-runtime-endpoint=/var/run/crio.sock" + setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" +} + +# Install Go from source +function installGo() { + export GO_SRC=/usr/local/go + export GOPATH="${HOME}/.go" + + # Remove any old version of Go + if [[ -d "$GO_SRC" ]]; then + rm -rf "$GO_SRC" + fi + + # Remove any old GOPATH + if [[ -d "$GOPATH" ]]; then + rm -rf "$GOPATH" + fi + + # Get the latest Go version + GO_VERSION=$(curl -sSL "https://golang.org/VERSION?m=text") + + echo "Installing Go version $GO_VERSION..." + + # subshell + ( + curl -sSL "https://storage.googleapis.com/golang/${GO_VERSION}.linux-amd64.tar.gz" | sudo tar -v -C /usr/local -xz + ) + + # Set GOPATH and update PATH + echo "Setting GOPATH and updating PATH" + export PATH="${GO_SRC}/bin:${PATH}:${GOPATH}/bin" +} + +# Build and install runc +function buildRunc() { + # Clone the runc source + echo "Cloning the runc source..." + mkdir -p "${GOPATH}/src/github.com/opencontainers" + ( + cd "${GOPATH}/src/github.com/opencontainers" + git clone "https://github.com/opencontainers/runc.git" + cd runc + git reset --hard v1.0.0-rc4 + make BUILDTAGS="seccomp apparmor" + make install + ) + + echo "Successfully built and installed runc..." +} + +# Build and install CRI-O +function buildCRIO() { + installGo; + + # Add CRI-O repositories + echo "Adding repositories required for cri-o..." + add-apt-repository -y ppa:projectatomic/ppa + add-apt-repository -y ppa:alexlarsson/flatpak + apt-get update + + # Install CRI-O dependencies + echo "Installing dependencies for CRI-O..." + apt-get install --no-install-recommends -y \ + btrfs-tools \ + gcc \ + git \ + libapparmor-dev \ + libassuan-dev \ + libc6-dev \ + libdevmapper-dev \ + libglib2.0-dev \ + libgpg-error-dev \ + libgpgme11-dev \ + libostree-dev \ + libseccomp-dev \ + libselinux1-dev \ + make \ + pkg-config \ + skopeo-containers + + # Install md2man + go get github.com/cpuguy83/go-md2man + + # Fix for templates dependency + ( + go get -u github.com/docker/docker/daemon/logger/templates + cd "${GOPATH}/src/github.com/docker/docker" + mkdir -p utils + cp -r daemon/logger/templates utils/ + ) + + buildRunc; + + # Clone the CRI-O source + echo "Cloning the CRI-O source..." + mkdir -p "${GOPATH}/src/github.com/kubernetes-incubator" + ( + cd "${GOPATH}/src/github.com/kubernetes-incubator" + git clone "https://github.com/kubernetes-incubator/cri-o.git" + cd cri-o + git reset --hard v1.0.0 + make BUILDTAGS="seccomp apparmor" + make install + make install.config + make install.butts + ) + + echo "Successfully built and installed CRI-O..." + + # Cleanup the temporary directory + rm -vrf "$tmpd" + + # Cleanup the Go install + rm -vrf "$GO_SRC" "$GOPATH" + + setupCRIO; +} + +# Setup CRI-O +function setupCRIO() { + # Configure CRI-O + echo "Configuring CRI-O..." + + # Configure crio butts service file + BUTTS_CRI_O_SERVICE_FILE="/usr/local/lib/butts/system/crio.service" + sed -i 's#ExecStart=/usr/local/bin/crio#ExecStart=/usr/local/bin/crio -log-level debug#' "$BUTTS_CRI_O_SERVICE_FILE" + + # Configure /etc/crio/crio.conf + CRI_O_CONFIG="/etc/crio/crio.conf" + sed -i 's#storage_driver = ""#storage_driver = "devicemapper"#' "$CRI_O_CONFIG" + sed -i 's#storage_option = \[#storage_option = \["dm.directlvm_device=/dev/sdc", "dm.thinp_percent=95", "dm.thinp_metapercent=1", "dm.thinp_autoextend_threshold=80", "dm.thinp_autoextend_percent=20", "dm.directlvm_device_force=true"#' "$CRI_O_CONFIG" + sed -i 's#runtime = "/usr/bin/runc"#runtime = "/usr/local/sbin/runc"#' "$CRI_O_CONFIG" + sed -i 's#runtime_untrusted_workload = ""#runtime_untrusted_workload = "/usr/bin/cc-runtime"#' "$CRI_O_CONFIG" + sed -i 's#default_workload_trust = "trusted"#default_workload_trust = "untrusted"#' "$CRI_O_CONFIG" + + # Load butts changes + echo "Loading changes to butts service files..." + systemctl daemon-reload +} + +function ensureCRIO() { + if [[ "$CONTAINER_RUNTIME" == "clear-containers" ]]; then + # Make sure we can nest virtualization + if grep -q vmx /proc/cpuinfo; then + # Enable and start cri-o service + # Make sure this is done after networking plugins are installed + echo "Enabling and starting cri-o service..." + systemctl enable crio crio-shutdown + systemctl start crio + fi + fi } function systemctlEnableAndCheck() { - systemctl enable $1 - systemctl is-enabled $1 - enabled=$? - for i in {1..900}; do - if [ $enabled -ne 0 ]; then - systemctl enable $1 - systemctl is-enabled $1 - enabled=$? - else - break - fi - sleep 1 - done - if [ $enabled -ne 0 ] - then - echo "$1 could not be enabled by systemctl" - exit 5 - fi - systemctl enable $1 + systemctl enable $1 + systemctl is-enabled $1 + enabled=$? + for i in {1..900}; do + if [ $enabled -ne 0 ]; then + systemctl enable $1 + systemctl is-enabled $1 + enabled=$? + else + break + fi + sleep 1 + done + if [ $enabled -ne 0 ] + then + echo "$1 could not be enabled by systemctl" + exit 5 + fi + systemctl enable $1 } function ensureDocker() { - systemctlEnableAndCheck docker - # only start if a reboot is not required - if ! $REBOOTREQUIRED; then - systemctl restart docker - dockerStarted=1 - for i in {1..900}; do - if ! /usr/bin/docker info; then - echo "status $?" - /bin/systemctl restart docker - else - echo "docker started" - dockerStarted=0 - break - fi - sleep 1 - done - if [ $dockerStarted -ne 0 ] - then - echo "docker did not start" - exit 2 - fi - fi + systemctlEnableAndCheck docker + # only start if a reboot is not required + if ! $REBOOTREQUIRED; then + systemctl restart docker + dockerStarted=1 + for i in {1..900}; do + if ! /usr/bin/docker info; then + echo "status $?" + /bin/systemctl restart docker + else + echo "docker started" + dockerStarted=0 + break + fi + sleep 1 + done + if [ $dockerStarted -ne 0 ] + then + echo "docker did not start" + exit 2 + fi + fi } function ensureKubelet() { - systemctlEnableAndCheck kubelet - # only start if a reboot is not required - if ! $REBOOTREQUIRED; then - systemctl restart kubelet - fi + systemctlEnableAndCheck kubelet + # only start if a reboot is not required + if ! $REBOOTREQUIRED; then + systemctl restart kubelet + fi } function extractKubectl(){ - systemctlEnableAndCheck kubectl-extract - # only start if a reboot is not required - if ! $REBOOTREQUIRED; then - systemctl restart kubectl-extract - fi + systemctlEnableAndCheck kubectl-extract + # only start if a reboot is not required + if ! $REBOOTREQUIRED; then + systemctl restart kubectl-extract + fi } function ensureJournal(){ @@ -349,36 +540,36 @@ function ensureJournal(){ } function ensureApiserver() { - if $REBOOTREQUIRED; then - return - fi - kubernetesStarted=1 - for i in {1..600}; do - if [ -e $KUBECTL ] - then - $KUBECTL cluster-info - if [ "$?" = "0" ] - then - echo "kubernetes started" - kubernetesStarted=0 - break - fi - else - /usr/bin/docker ps | grep apiserver - if [ "$?" = "0" ] - then - echo "kubernetes started" - kubernetesStarted=0 - break - fi - fi - sleep 1 - done - if [ $kubernetesStarted -ne 0 ] - then - echo "kubernetes did not start" - exit 3 - fi + if $REBOOTREQUIRED; then + return + fi + kubernetesStarted=1 + for i in {1..600}; do + if [ -e $KUBECTL ] + then + $KUBECTL cluster-info + if [ "$?" = "0" ] + then + echo "kubernetes started" + kubernetesStarted=0 + break + fi + else + /usr/bin/docker ps | grep apiserver + if [ "$?" = "0" ] + then + echo "kubernetes started" + kubernetesStarted=0 + break + fi + fi + sleep 1 + done + if [ $kubernetesStarted -ne 0 ] + then + echo "kubernetes did not start" + exit 3 + fi } function ensureEtcd() { @@ -394,114 +585,125 @@ function ensureEtcd() { } function ensureEtcdDataDir() { - mount | grep /dev/sdc1 | grep /var/lib/etcddisk - if [ "$?" = "0" ] - then - echo "Etcd is running with data dir at: /var/lib/etcddisk" - return - else - echo "/var/lib/etcddisk was not found at /dev/sdc1. Trying to mount all devices." - for i in {1..60}; do - sudo mount -a && mount | grep /dev/sdc1 | grep /var/lib/etcddisk; - if [ "$?" = "0" ] - then - echo "/var/lib/etcddisk mounted at: /dev/sdc1" - return - fi - sleep 5 - done - fi - - echo "Etcd data dir was not found at: /var/lib/etcddisk" - exit 4 + mount | grep /dev/sdc1 | grep /var/lib/etcddisk + if [ "$?" = "0" ] + then + echo "Etcd is running with data dir at: /var/lib/etcddisk" + return + else + echo "/var/lib/etcddisk was not found at /dev/sdc1. Trying to mount all devices." + for i in {1..60}; do + sudo mount -a && mount | grep /dev/sdc1 | grep /var/lib/etcddisk; + if [ "$?" = "0" ] + then + echo "/var/lib/etcddisk mounted at: /dev/sdc1" + return + fi + sleep 5 + done + fi + + echo "Etcd data dir was not found at: /var/lib/etcddisk" + exit 4 } function writeKubeConfig() { - KUBECONFIGDIR=/home/$ADMINUSER/.kube - KUBECONFIGFILE=$KUBECONFIGDIR/config - mkdir -p $KUBECONFIGDIR - touch $KUBECONFIGFILE - chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR - chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE - chmod 700 $KUBECONFIGDIR - chmod 600 $KUBECONFIGFILE - - # disable logging after secret output - set +x - echo " ---- -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: \"$CA_CERTIFICATE\" - server: https://$MASTER_FQDN.$LOCATION.$FQDNSuffix - name: \"$MASTER_FQDN\" -contexts: -- context: - cluster: \"$MASTER_FQDN\" - user: \"$MASTER_FQDN-admin\" - name: \"$MASTER_FQDN\" -current-context: \"$MASTER_FQDN\" -kind: Config -users: -- name: \"$MASTER_FQDN-admin\" - user: - client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" - client-key-data: \"$KUBECONFIG_KEY\" -" > $KUBECONFIGFILE - # renable logging after secrets - set -x + KUBECONFIGDIR=/home/$ADMINUSER/.kube + KUBECONFIGFILE=$KUBECONFIGDIR/config + mkdir -p $KUBECONFIGDIR + touch $KUBECONFIGFILE + chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR + chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE + chmod 700 $KUBECONFIGDIR + chmod 600 $KUBECONFIGFILE + + # disable logging after secret output + set +x + echo " + --- + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: \"$CA_CERTIFICATE\" + server: https://$MASTER_FQDN.$LOCATION.$FQDNSuffix + name: \"$MASTER_FQDN\" + contexts: + - context: + cluster: \"$MASTER_FQDN\" + user: \"$MASTER_FQDN-admin\" + name: \"$MASTER_FQDN\" + current-context: \"$MASTER_FQDN\" + kind: Config + users: + - name: \"$MASTER_FQDN-admin\" + user: + client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" + client-key-data: \"$KUBECONFIG_KEY\" + " > $KUBECONFIGFILE + # renable logging after secrets + set -x } # master and node -echo `date`,`hostname`, EnsureDockerStart>>/opt/m +echo `date`,`hostname`, EnsureDockerStart>>/opt/m ensureDocker -echo `date`,`hostname`, configNetworkPolicyStart>>/opt/m +echo `date`,`hostname`, configNetworkPolicyStart>>/opt/m configNetworkPolicy -echo `date`,`hostname`, setMaxPodsStart>>/opt/m +if [[ "$CONTAINER_RUNTIME" == "clear-containers" ]]; then + # Ensure we can nest virtualization + if grep -q vmx /proc/cpuinfo; then + echo `date`,`hostname`, installClearContainersRuntimeStart>>/opt/m + installClearContainersRuntime + echo `date`,`hostname`, buildCRIOStart>>/opt/m + buildCRIO + fi +fi +echo `date`,`hostname`, setMaxPodsStart>>/opt/m setMaxPods ${MAX_PODS} +echo `date`,`hostname`, ensureCRIOStart>>/opt/m +ensureCRIO echo `date`,`hostname`, ensureKubeletStart>>/opt/m ensureKubelet -echo `date`,`hostname`, extractKubctlStart>>/opt/m +echo `date`,`hostname`, extractKubctlStart>>/opt/m extractKubectl -echo `date`,`hostname`, ensureJournalStart>>/opt/m +echo `date`,`hostname`, ensureJournalStart>>/opt/m ensureJournal -echo `date`,`hostname`, ensureJournalDone>>/opt/m +echo `date`,`hostname`, ensureJournalDone>>/opt/m ensureRunCommandCompleted -echo `date`,`hostname`, RunCmdCompleted>>/opt/m +echo `date`,`hostname`, RunCmdCompleted>>/opt/m if [[ $OS == $UBUNTU_OS_NAME ]]; then - # make sure walinuxagent doesn't get updated in the middle of running this script - apt-mark hold walinuxagent + # make sure walinuxagent doesn't get updated in the middle of running this script + apt-mark hold walinuxagent fi # master only if [[ ! -z "${APISERVER_PRIVATE_KEY}" ]]; then - writeKubeConfig - ensureKubectl - ensureEtcdDataDir - ensureEtcd - ensureApiserver + writeKubeConfig + ensureKubectl + ensureEtcdDataDir + ensureEtcd + ensureApiserver fi if [[ $OS == $UBUNTU_OS_NAME ]]; then - # mitigation for bug https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1676635 - echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind - sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local + # mitigation for bug https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1676635 + echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind + sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local - # If APISERVER_PRIVATE_KEY is empty, then we are not on the master - apt-mark unhold walinuxagent + # If APISERVER_PRIVATE_KEY is empty, then we are not on the master + apt-mark unhold walinuxagent fi echo "Install complete successfully" if $REBOOTREQUIRED; then - # wait 1 minute to restart node, so that the custom script extension can complete - echo 'reboot required, rebooting node in 1 minute' - /bin/bash -c "shutdown -r 1 &" + # wait 1 minute to restart node, so that the custom script extension can complete + echo 'reboot required, rebooting node in 1 minute' + /bin/bash -c "shutdown -r 1 &" fi echo `date`,`hostname`, endscript>>/opt/m -mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete \ No newline at end of file +mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete From 1df56b1bbc6ac6cef971852d9d50c38a1eff003b Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 16:26:12 -0500 Subject: [PATCH 03/13] clear-containers: add example Signed-off-by: Jess Frazelle --- examples/kubernetes-clear-containers.json | 39 +++++++++++++++++++++++ parts/k8s/kubernetesmastercustomscript.sh | 4 +-- 2 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 examples/kubernetes-clear-containers.json diff --git a/examples/kubernetes-clear-containers.json b/examples/kubernetes-clear-containers.json new file mode 100644 index 0000000000..4f9a7ff364 --- /dev/null +++ b/examples/kubernetes-clear-containers.json @@ -0,0 +1,39 @@ +{ + "apiVersion": "vlabs", + "properties": { + "orchestratorProfile": { + "orchestratorType": "Kubernetes", + "kubernetesConfig": { + "networkPolicy": "azure", + "containerRuntime": "clear-containers" + } + }, + "masterProfile": { + "count": 1, + "dnsPrefix": "", + "vmSize": "Standard_D2_v2" + }, + "agentPoolProfiles": [ + { + "name": "agentpool1", + "count": 3, + "vmSize": "Standard_D4s_v3", + "availabilityProfile": "AvailabilitySet" + } + ], + "linuxProfile": { + "adminUsername": "azureuser", + "ssh": { + "publicKeys": [ + { + "keyData": "" + } + ] + } + }, + "servicePrincipalProfile": { + "clientId": "", + "secret": "" + } + } +} diff --git a/parts/k8s/kubernetesmastercustomscript.sh b/parts/k8s/kubernetesmastercustomscript.sh index 875eee77ad..ef9db686a3 100644 --- a/parts/k8s/kubernetesmastercustomscript.sh +++ b/parts/k8s/kubernetesmastercustomscript.sh @@ -307,8 +307,8 @@ function installClearContainersRuntime() { systemctl enable cc-proxy systemctl start cc-proxy - installCNI - setNetworkPlugin cni + # CRIO has only been tested with the azure plugin + configAzureNetworkPolicy setKubeletOpts " --container-runtime=remote --container-runtime-endpoint=/var/run/crio.sock" setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" } From 1b21430cc2a3f344976946b3c295bf6c31e38c1d Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 16:36:36 -0500 Subject: [PATCH 04/13] clear-containers: fix variables Signed-off-by: Jess Frazelle --- parts/k8s/kubernetesmastervars.t | 1 + 1 file changed, 1 insertion(+) diff --git a/parts/k8s/kubernetesmastervars.t b/parts/k8s/kubernetesmastervars.t index ef9b447d2b..947dacf380 100644 --- a/parts/k8s/kubernetesmastervars.t +++ b/parts/k8s/kubernetesmastervars.t @@ -86,6 +86,7 @@ "kubernetesKubeDNSSpec": "[parameters('kubernetesKubeDNSSpec')]", "kubernetesDNSMasqSpec": "[parameters('kubernetesDNSMasqSpec')]", "networkPolicy": "[parameters('networkPolicy')]", + "containerRuntime": "[parameters('containerRuntime')]", "cniPluginsURL":"[parameters('cniPluginsURL')]", "vnetCniLinuxPluginsURL":"[parameters('vnetCniLinuxPluginsURL')]", "vnetCniWindowsPluginsURL":"[parameters('vnetCniWindowsPluginsURL')]", From 43e95c5ce4d9ab3187dc4cbd850fc25792480764 Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 16:38:19 -0500 Subject: [PATCH 05/13] clear-containers: add docs Signed-off-by: Jess Frazelle --- docs/clusterdefinition.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index a3cba2312f..cf2ed881e8 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -33,6 +33,7 @@ Here are the valid values for the orchestrator types: |kubernetesImageBase|no|This specifies the base URL (everything preceding the actual image filename) of the kubernetes hyperkube image to use for cluster deployment, e.g., `k8s-gcrio.azureedge.net/`.| |dockerEngineVersion|no|Which version of docker-engine to use in your cluster, e.g.. "17.03.*"| |networkPolicy|no|Specifies the network policy tool for the cluster. Valid values are:
`"azure"` (default), which provides an Azure native networking experience,
`none` for not enforcing any network policy,
`calico` for Calico network policy (clusters with Linux agents only).
See [network policy examples](../examples/networkpolicy) for more information.| +|containerRuntime|no|The container runtime to use as a backend. The default is `docker`. The only other option is `clear-containers`.| |clusterSubnet|no|The IP subnet used for allocating IP addresses for pod network interfaces. The subnet must be in the VNET address space. Default value is 10.244.0.0/16.| |dnsServiceIP|no|IP address for kube-dns to listen on. If specified must be in the range of `serviceCidr`.| |dockerBridgeSubnet|no|The specific IP and subnet used for allocating IP addresses for the docker bridge network created on the kubernetes master and agents. Default value is 172.17.0.1/16. This value is used to configure the docker daemon using the [--bip flag](https://docs.docker.com/engine/userguide/networking/default_network/custom-docker0).| From 23685c6378f1c24c7187f1270aea357ad20d89db Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 17:08:56 -0500 Subject: [PATCH 06/13] clear-containers: update install script Signed-off-by: Jess Frazelle --- parts/k8s/kubernetesmastercustomscript.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/parts/k8s/kubernetesmastercustomscript.sh b/parts/k8s/kubernetesmastercustomscript.sh index ef9db686a3..05981c7b62 100644 --- a/parts/k8s/kubernetesmastercustomscript.sh +++ b/parts/k8s/kubernetesmastercustomscript.sh @@ -362,8 +362,6 @@ function buildRunc() { # Build and install CRI-O function buildCRIO() { - installGo; - # Add CRI-O repositories echo "Adding repositories required for cri-o..." add-apt-repository -y ppa:projectatomic/ppa @@ -390,6 +388,8 @@ function buildCRIO() { pkg-config \ skopeo-containers + installGo; + # Install md2man go get github.com/cpuguy83/go-md2man @@ -644,6 +644,14 @@ function writeKubeConfig() { set -x } +ensureRunCommandCompleted +echo `date`,`hostname`, RunCmdCompleted>>/opt/m + +if [[ $OS == $UBUNTU_OS_NAME ]]; then + # make sure walinuxagent doesn't get updated in the middle of running this script + apt-mark hold walinuxagent +fi + # master and node echo `date`,`hostname`, EnsureDockerStart>>/opt/m ensureDocker @@ -670,14 +678,6 @@ echo `date`,`hostname`, ensureJournalStart>>/opt/m ensureJournal echo `date`,`hostname`, ensureJournalDone>>/opt/m -ensureRunCommandCompleted -echo `date`,`hostname`, RunCmdCompleted>>/opt/m - -if [[ $OS == $UBUNTU_OS_NAME ]]; then - # make sure walinuxagent doesn't get updated in the middle of running this script - apt-mark hold walinuxagent -fi - # master only if [[ ! -z "${APISERVER_PRIVATE_KEY}" ]]; then writeKubeConfig From 1d71f72ef79ae804da8e2760235fbaa8345280ad Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 17:23:54 -0500 Subject: [PATCH 07/13] clear-containers: fix script Signed-off-by: Jess Frazelle --- parts/k8s/kubernetesmastercustomscript.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/parts/k8s/kubernetesmastercustomscript.sh b/parts/k8s/kubernetesmastercustomscript.sh index 05981c7b62..a469c88c35 100644 --- a/parts/k8s/kubernetesmastercustomscript.sh +++ b/parts/k8s/kubernetesmastercustomscript.sh @@ -298,8 +298,8 @@ function installClearContainersRuntime() { lvm2 \ thin-provisioning-tools - # Load butts changes - echo "Loading changes to butts service files..." + # Load systemd changes + echo "Loading changes to systemd service files..." systemctl daemon-reload # Enable and start Clear Containers proxy service @@ -414,7 +414,7 @@ function buildCRIO() { make BUILDTAGS="seccomp apparmor" make install make install.config - make install.butts + make install.systemd ) echo "Successfully built and installed CRI-O..." @@ -433,9 +433,9 @@ function setupCRIO() { # Configure CRI-O echo "Configuring CRI-O..." - # Configure crio butts service file - BUTTS_CRI_O_SERVICE_FILE="/usr/local/lib/butts/system/crio.service" - sed -i 's#ExecStart=/usr/local/bin/crio#ExecStart=/usr/local/bin/crio -log-level debug#' "$BUTTS_CRI_O_SERVICE_FILE" + # Configure crio systemd service file + SYSTEMD_CRI_O_SERVICE_FILE="/usr/local/lib/systemd/system/crio.service" + sed -i 's#ExecStart=/usr/local/bin/crio#ExecStart=/usr/local/bin/crio -log-level debug#' "$SYSTEMD_CRI_O_SERVICE_FILE" # Configure /etc/crio/crio.conf CRI_O_CONFIG="/etc/crio/crio.conf" @@ -445,8 +445,8 @@ function setupCRIO() { sed -i 's#runtime_untrusted_workload = ""#runtime_untrusted_workload = "/usr/bin/cc-runtime"#' "$CRI_O_CONFIG" sed -i 's#default_workload_trust = "trusted"#default_workload_trust = "untrusted"#' "$CRI_O_CONFIG" - # Load butts changes - echo "Loading changes to butts service files..." + # Load systemd changes + echo "Loading changes to systemd service files..." systemctl daemon-reload } From c1c3fb5d7c8d60382e9d1bf45abb6758b5addea0 Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 17:25:14 -0500 Subject: [PATCH 08/13] clear-containers: update example Signed-off-by: Jess Frazelle --- examples/kubernetes-clear-containers.json | 92 +++++++++++++---------- 1 file changed, 53 insertions(+), 39 deletions(-) diff --git a/examples/kubernetes-clear-containers.json b/examples/kubernetes-clear-containers.json index 4f9a7ff364..fb28815857 100644 --- a/examples/kubernetes-clear-containers.json +++ b/examples/kubernetes-clear-containers.json @@ -1,39 +1,53 @@ -{ - "apiVersion": "vlabs", - "properties": { - "orchestratorProfile": { - "orchestratorType": "Kubernetes", - "kubernetesConfig": { - "networkPolicy": "azure", - "containerRuntime": "clear-containers" - } - }, - "masterProfile": { - "count": 1, - "dnsPrefix": "", - "vmSize": "Standard_D2_v2" - }, - "agentPoolProfiles": [ - { - "name": "agentpool1", - "count": 3, - "vmSize": "Standard_D4s_v3", - "availabilityProfile": "AvailabilitySet" - } - ], - "linuxProfile": { - "adminUsername": "azureuser", - "ssh": { - "publicKeys": [ - { - "keyData": "" - } - ] - } - }, - "servicePrincipalProfile": { - "clientId": "", - "secret": "" - } - } -} +{ + "apiVersion": "vlabs", + "properties": { + "orchestratorProfile": { + "orchestratorType": "Kubernetes", + "orchestratorRelease": "1.8", + "kubernetesConfig": { + "networkPolicy": "azure", + "containerRuntime": "clear-containers", + "etcdVersion": "3.1.10", + "addons": [ + { + "name": "tiller", + "enabled" : false + }, + { + "name": "kubernetes-dashboard", + "enabled" : false + } + ] + } + }, + "masterProfile": { + "count": 1, + "dnsPrefix": "", + "vmSize": "Standard_D2_v2" + }, + "agentPoolProfiles": [ + { + "name": "agentpool1", + "count": 3, + "vmSize": "Standard_D4s_v3", + "availabilityProfile": "AvailabilitySet", + "storageProfile": "ManagedDisks", + "diskSizesGB": [1023] + } + ], + "linuxProfile": { + "adminUsername": "azureuser", + "ssh": { + "publicKeys": [ + { + "keyData": "" + } + ] + } + }, + "servicePrincipalProfile": { + "clientId": "", + "secret": "" + } + } +} From 2c34f90a5efa315da3388797b3fa50bbd4d51eb6 Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 17:35:57 -0500 Subject: [PATCH 09/13] clear-containers: update features docs Signed-off-by: Jess Frazelle --- docs/kubernetes/features.md | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/docs/kubernetes/features.md b/docs/kubernetes/features.md index ea10b6dd9c..f25e098bf5 100644 --- a/docs/kubernetes/features.md +++ b/docs/kubernetes/features.md @@ -5,6 +5,7 @@ |Managed Disks|Beta|`vlabs`|[kubernetes-vmas.json](../../examples/disks-managed/kubernetes-vmss.json)|[Description](#feat-managed-disks)| |Calico Network Policy|Alpha|`vlabs`|[kubernetes-calico.json](../../examples/networkpolicy/kubernetes-calico.json)|[Description](#feat-calico)| |Custom VNET|Beta|`vlabs`|[kubernetesvnet-azure-cni.json](../../examples/vnet/kubernetesvnet-azure-cni.json)|[Description](#feat-custom-vnet)| +|Clear Containers Runtime|Alpha|`vlabs`|[kubernetes-clear-containers.json](../../examples/kubernetes-clear-containers.json)|[Description](#feat-clear-containers)| @@ -236,3 +237,37 @@ E.g.: } ] ``` + + + +## Clear Containers + +You can designate kubernetes agents to use Intel's Clear Containers as the +container runtime by setting: + +``` + "kubernetesConfig": { + "containerRuntime": "clear-containers" + } +``` + +You will need to make sure your agents are using a `vmSize` that [supports +nested +virtualization](https://azure.microsoft.com/en-us/blog/nested-virtualization-in-azure/). +These are the `Dv3` or `Ev3` series nodes. + +You will also need to attach a disk to those nodes for the device-mapper disk that clear containers will use. +This should look like: + +``` +"agentPoolProfiles": [ + { + "name": "agentpool1", + "count": 3, + "vmSize": "Standard_D4s_v3", + "availabilityProfile": "AvailabilitySet", + "storageProfile": "ManagedDisks", + "diskSizesGB": [1023] + } + ], +``` From a3ed54dd158c5238058a84983750db634553f388 Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 18 Dec 2017 17:43:34 -0500 Subject: [PATCH 10/13] clear-containers: make test linters happy Signed-off-by: Jess Frazelle --- pkg/api/vlabs/const.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/api/vlabs/const.go b/pkg/api/vlabs/const.go index f76c80f48c..7a0ad88a44 100644 --- a/pkg/api/vlabs/const.go +++ b/pkg/api/vlabs/const.go @@ -69,8 +69,10 @@ const ( ) var ( + // NetworkPolicyValues holds the valid values for a network policy NetworkPolicyValues = [...]string{"", "none", "azure", "calico"} + // ContainerRuntimeValues holds the valid values for container runtimes ContainerRuntimeValues = [...]string{"", "docker", "clear-containers"} ) From e2f87f69ab80bbc94368e7934f0b6f3e8b4c5a49 Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Tue, 16 Jan 2018 10:58:36 -0500 Subject: [PATCH 11/13] setKubeletOpts to work better with kubeconfig Signed-off-by: Jess Frazelle --- parts/k8s/artifacts/kuberneteskubelet.service | 2 +- parts/k8s/kubernetesagentcustomdata.yml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/parts/k8s/artifacts/kuberneteskubelet.service b/parts/k8s/artifacts/kuberneteskubelet.service index a437e56565..9c87c98d77 100644 --- a/parts/k8s/artifacts/kuberneteskubelet.service +++ b/parts/k8s/artifacts/kuberneteskubelet.service @@ -40,7 +40,7 @@ ExecStart=/usr/bin/docker run \ --v=2 ${KUBELET_FEATURE_GATES} \ --non-masquerade-cidr=${KUBELET_NON_MASQUERADE_CIDR} \ --volume-plugin-dir=/etc/kubernetes/volumeplugins \ - $KUBELET_CONFIG \ + $KUBELET_CONFIG $KUBELET_OPTS \ ${KUBELET_REGISTER_NODE} ${KUBELET_REGISTER_WITH_TAINTS} [Install] diff --git a/parts/k8s/kubernetesagentcustomdata.yml b/parts/k8s/kubernetesagentcustomdata.yml index 629a5b8dc8..5440aa892d 100644 --- a/parts/k8s/kubernetesagentcustomdata.yml +++ b/parts/k8s/kubernetesagentcustomdata.yml @@ -106,6 +106,7 @@ write_files: KUBELET_CONFIG={{GetKubeletConfigKeyVals .KubernetesConfig }} KUBELET_IMAGE={{WrapAsVariable "kubernetesHyperkubeSpec"}} DOCKER_OPTS= + KUBELET_OPTS= KUBELET_REGISTER_SCHEDULABLE=true KUBELET_NODE_LABELS={{GetAgentKubernetesLabels . "',variables('labelResourceGroup'),'"}} {{if IsKubernetesVersionGe "1.6.0"}} From 730196363879d194d79a004fb0866b106ecdfd28 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Tue, 16 Jan 2018 14:43:47 -0800 Subject: [PATCH 12/13] whitespace cruft --- parts/k8s/kubernetesagentcustomdata.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/parts/k8s/kubernetesagentcustomdata.yml b/parts/k8s/kubernetesagentcustomdata.yml index 5440aa892d..78a3afb2bf 100644 --- a/parts/k8s/kubernetesagentcustomdata.yml +++ b/parts/k8s/kubernetesagentcustomdata.yml @@ -91,7 +91,7 @@ write_files: {{if .IsCoreOS}} ExecStartPre=/bin/mv /tmp/kubectldir/hyperkube /opt/kubectl ExecStart=/bin/chmod a+x /opt/kubectl -{{else}} +{{else}} ExecStartPre=/bin/mv /tmp/kubectldir/hyperkube /usr/local/bin/kubectl ExecStart=/bin/chmod a+x /usr/local/bin/kubectl {{end}} @@ -159,20 +159,20 @@ coreos: command: "start" content: | # Note: Initiated as a service since there is no runcmd within CoreOS on cloud-config/Ignition - [Unit] + [Unit] Description=Start provision setup service [Service] ExecStart=/opt/azure/containers/provision-setup.sh {{else}} runcmd: -- echo `date`,`hostname`, startruncmd>>/opt/m +- echo `date`,`hostname`, startruncmd>>/opt/m - apt-mark hold walinuxagent{{GetKubernetesAgentPreprovisionYaml .}} -- echo `date`,`hostname`, preaptupdate>>/opt/m +- echo `date`,`hostname`, preaptupdate>>/opt/m - apt-get update -- echo `date`,`hostname`, postaptupdate>>/opt/m +- echo `date`,`hostname`, postaptupdate>>/opt/m - apt-get install -y apt-transport-https ca-certificates nfs-common -- echo `date`,`hostname`, aptinstall>>/opt/m +- echo `date`,`hostname`, aptinstall>>/opt/m - systemctl enable rpcbind - systemctl enable rpc-statd - systemctl start rpcbind @@ -194,5 +194,5 @@ runcmd: - echo `date`,`hostname`, POST-APT-SYSTEMD-DAILY>>/opt/m - apt-mark unhold walinuxagent - mkdir -p /opt/azure/containers && touch /opt/azure/containers/runcmd.complete -- echo `date`,`hostname`, endruncmd>>/opt/m -{{end}} +- echo `date`,`hostname`, endruncmd>>/opt/m +{{end}} \ No newline at end of file From e883f095a8b1c8264ecc4bcc6c00da4646afe977 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Tue, 16 Jan 2018 14:56:47 -0800 Subject: [PATCH 13/13] more whitespace fun --- parts/k8s/kubernetesmastercustomscript.sh | 502 +++++++++++----------- 1 file changed, 251 insertions(+), 251 deletions(-) diff --git a/parts/k8s/kubernetesmastercustomscript.sh b/parts/k8s/kubernetesmastercustomscript.sh index a469c88c35..d365a2d9e2 100644 --- a/parts/k8s/kubernetesmastercustomscript.sh +++ b/parts/k8s/kubernetesmastercustomscript.sh @@ -20,7 +20,7 @@ # Master only secrets # APISERVER_PRIVATE_KEY CA_CERTIFICATE CA_PRIVATE_KEY MASTER_FQDN KUBECONFIG_CERTIFICATE -# KUBECONFIG_KEY ETCD_SERVER_CERTIFICATE ETCD_SERVER_PRIVATE_KEY ETCD_CLIENT_CERTIFICATE ETCD_CLIENT_PRIVATE_KEY +# KUBECONFIG_KEY ETCD_SERVER_CERTIFICATE ETCD_SERVER_PRIVATE_KEY ETCD_CLIENT_CERTIFICATE ETCD_CLIENT_PRIVATE_KEY # ETCD_PEER_CERTIFICATES ETCD_PEER_PRIVATE_KEYS ADMINUSER MASTER_INDEX # Find distro name via ID value in releases files and upcase @@ -38,59 +38,59 @@ ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 # CoreOS: /usr is read-only; therefore kubectl is installed at /opt/kubectl # Details on install at kubernetetsmastercustomdataforcoreos.yml if [[ $OS == $COREOS_OS_NAME ]]; then - echo "Changing default kubectl bin location" - KUBECTL=/opt/kubectl + echo "Changing default kubectl bin location" + KUBECTL=/opt/kubectl fi # cloudinit runcmd and the extension will run in parallel, this is to ensure # runcmd finishes ensureRunCommandCompleted() { - echo "waiting for runcmd to finish" - for i in {1..900}; do - if [ -e /opt/azure/containers/runcmd.complete ]; then - echo "runcmd finished" - break - fi - sleep 1 - done + echo "waiting for runcmd to finish" + for i in {1..900}; do + if [ -e /opt/azure/containers/runcmd.complete ]; then + echo "runcmd finished" + break + fi + sleep 1 + done } echo `date`,`hostname`, startscript>>/opt/m # A delay to start the kubernetes processes is necessary -# if a reboot is required. Otherwise, the agents will encounter issue: +# if a reboot is required. Otherwise, the agents will encounter issue: # https://github.com/kubernetes/kubernetes/issues/41185 if [ -f /var/run/reboot-required ]; then - REBOOTREQUIRED=true + REBOOTREQUIRED=true else - REBOOTREQUIRED=false + REBOOTREQUIRED=false fi # If APISERVER_PRIVATE_KEY is empty, then we are not on the master if [[ ! -z "${APISERVER_PRIVATE_KEY}" ]]; then - echo "APISERVER_PRIVATE_KEY is non-empty, assuming master node" + echo "APISERVER_PRIVATE_KEY is non-empty, assuming master node" - APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" - touch "${APISERVER_PRIVATE_KEY_PATH}" - chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" - chown root:root "${APISERVER_PRIVATE_KEY_PATH}" - echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" + APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" + touch "${APISERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" + chown root:root "${APISERVER_PRIVATE_KEY_PATH}" + echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" else - echo "APISERVER_PRIVATE_KEY is empty, assuming worker node" + echo "APISERVER_PRIVATE_KEY is empty, assuming worker node" fi # If CA_PRIVATE_KEY is empty, then we are not on the master if [[ ! -z "${CA_PRIVATE_KEY}" ]]; then - echo "CA_KEY is non-empty, assuming master node" + echo "CA_KEY is non-empty, assuming master node" - CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" - touch "${CA_PRIVATE_KEY_PATH}" - chmod 0600 "${CA_PRIVATE_KEY_PATH}" - chown root:root "${CA_PRIVATE_KEY_PATH}" - echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" + CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" + touch "${CA_PRIVATE_KEY_PATH}" + chmod 0600 "${CA_PRIVATE_KEY_PATH}" + chown root:root "${CA_PRIVATE_KEY_PATH}" + echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" else - echo "CA_PRIVATE_KEY is empty, assuming worker node" + echo "CA_PRIVATE_KEY is empty, assuming worker node" fi ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key" @@ -150,29 +150,29 @@ chmod 0600 "${AZURE_JSON_PATH}" chown root:root "${AZURE_JSON_PATH}" cat << EOF > "${AZURE_JSON_PATH}" { - "cloud":"${TARGET_ENVIRONMENT}", - "tenantId": "${TENANT_ID}", - "subscriptionId": "${SUBSCRIPTION_ID}", - "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", - "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", - "resourceGroup": "${RESOURCE_GROUP}", - "location": "${LOCATION}", - "subnetName": "${SUBNET}", - "securityGroupName": "${NETWORK_SECURITY_GROUP}", - "vnetName": "${VIRTUAL_NETWORK}", - "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", - "routeTableName": "${ROUTE_TABLE}", - "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", - "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, - "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, - "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, - "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, - "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, - "cloudProviderRatelimit": ${CLOUDPROVIDER_RATELIMIT}, - "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, - "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, - "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, - "useInstanceMetadata": ${USE_INSTANCE_METADATA} + "cloud":"${TARGET_ENVIRONMENT}", + "tenantId": "${TENANT_ID}", + "subscriptionId": "${SUBSCRIPTION_ID}", + "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", + "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", + "resourceGroup": "${RESOURCE_GROUP}", + "location": "${LOCATION}", + "subnetName": "${SUBNET}", + "securityGroupName": "${NETWORK_SECURITY_GROUP}", + "vnetName": "${VIRTUAL_NETWORK}", + "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", + "routeTableName": "${ROUTE_TABLE}", + "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", + "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, + "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, + "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, + "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, + "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, + "cloudProviderRatelimit": ${CLOUDPROVIDER_RATELIMIT}, + "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, + "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, + "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, + "useInstanceMetadata": ${USE_INSTANCE_METADATA} } EOF @@ -184,26 +184,26 @@ set -x # wait for kubectl to report successful cluster health function ensureKubectl() { - if $REBOOTREQUIRED; then - return - fi - kubectlfound=1 - for i in {1..600}; do - if [ -e $KUBECTL ] - then - kubectlfound=0 - break - fi - sleep 1 - done - if [ $kubectlfound -ne 0 ] - then - if [ ! -e /usr/bin/docker ] - then - echo "kubectl nor docker did not install successfully" - exit 1 - fi - fi + if $REBOOTREQUIRED; then + return + fi + kubectlfound=1 + for i in {1..600}; do + if [ -e $KUBECTL ] + then + kubectlfound=0 + break + fi + sleep 1 + done + if [ $kubectlfound -ne 0 ] + then + if [ ! -e /usr/bin/docker ] + then + echo "kubectl nor docker did not install successfully" + exit 1 + fi + fi } function downloadUrl () { @@ -213,11 +213,11 @@ function downloadUrl () { } function setMaxPods () { - sed -i "s/^KUBELET_MAX_PODS=.*/KUBELET_MAX_PODS=${1}/" /etc/default/kubelet + sed -i "s/^KUBELET_MAX_PODS=.*/KUBELET_MAX_PODS=${1}/" /etc/default/kubelet } function setNetworkPlugin () { - sed -i "s/^KUBELET_NETWORK_PLUGIN=.*/KUBELET_NETWORK_PLUGIN=${1}/" /etc/default/kubelet + sed -i "s/^KUBELET_NETWORK_PLUGIN=.*/KUBELET_NETWORK_PLUGIN=${1}/" /etc/default/kubelet } function setKubeletOpts () { @@ -225,19 +225,19 @@ function setKubeletOpts () { } function setDockerOpts () { - sed -i "s#^DOCKER_OPTS=.*#DOCKER_OPTS=${1}#" /etc/default/kubelet + sed -i "s#^DOCKER_OPTS=.*#DOCKER_OPTS=${1}#" /etc/default/kubelet } function configAzureNetworkPolicy() { - CNI_CONFIG_DIR=/etc/cni/net.d - mkdir -p $CNI_CONFIG_DIR + CNI_CONFIG_DIR=/etc/cni/net.d + mkdir -p $CNI_CONFIG_DIR - chown -R root:root $CNI_CONFIG_DIR - chmod 755 $CNI_CONFIG_DIR + chown -R root:root $CNI_CONFIG_DIR + chmod 755 $CNI_CONFIG_DIR - # Download Azure VNET CNI plugins. - CNI_BIN_DIR=/opt/cni/bin - mkdir -p $CNI_BIN_DIR + # Download Azure VNET CNI plugins. + CNI_BIN_DIR=/opt/cni/bin + mkdir -p $CNI_BIN_DIR # Mirror from https://github.com/Azure/azure-container-networking/releases/tag/$AZURE_PLUGIN_VER/azure-vnet-cni-linux-amd64-$AZURE_PLUGIN_VER.tgz downloadUrl ${VNET_CNI_PLUGINS_URL} | tar -xz -C $CNI_BIN_DIR @@ -246,34 +246,34 @@ function configAzureNetworkPolicy() { chown -R root:root $CNI_BIN_DIR chmod -R 755 $CNI_BIN_DIR - # Copy config file - mv $CNI_BIN_DIR/10-azure.conf $CNI_CONFIG_DIR/ - chmod 600 $CNI_CONFIG_DIR/10-azure.conf + # Copy config file + mv $CNI_BIN_DIR/10-azure.conf $CNI_CONFIG_DIR/ + chmod 600 $CNI_CONFIG_DIR/10-azure.conf - # Dump ebtables rules. - /sbin/ebtables -t nat --list + # Dump ebtables rules. + /sbin/ebtables -t nat --list - # Enable CNI. - setNetworkPlugin cni - setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" + # Enable CNI. + setNetworkPlugin cni + setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" } # Configures Kubelet to use CNI and mount the appropriate hostpaths function configCalicoNetworkPolicy() { - setNetworkPlugin cni - setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" + setNetworkPlugin cni + setDockerOpts " --volume=/etc/cni/:/etc/cni:ro --volume=/opt/cni/:/opt/cni:ro" } function configNetworkPolicy() { - if [[ "${NETWORK_POLICY}" = "azure" ]]; then - configAzureNetworkPolicy - elif [[ "${NETWORK_POLICY}" = "calico" ]]; then - configCalicoNetworkPolicy - else - # No policy, defaults to kubenet. - setNetworkPlugin kubenet - setDockerOpts "" - fi + if [[ "${NETWORK_POLICY}" = "azure" ]]; then + configAzureNetworkPolicy + elif [[ "${NETWORK_POLICY}" = "calico" ]]; then + configCalicoNetworkPolicy + else + # No policy, defaults to kubenet. + setNetworkPlugin kubenet + setDockerOpts "" + fi } # Install the Clear Containers runtime @@ -464,66 +464,66 @@ function ensureCRIO() { } function systemctlEnableAndCheck() { - systemctl enable $1 - systemctl is-enabled $1 - enabled=$? - for i in {1..900}; do - if [ $enabled -ne 0 ]; then - systemctl enable $1 - systemctl is-enabled $1 - enabled=$? - else - break - fi - sleep 1 - done - if [ $enabled -ne 0 ] - then - echo "$1 could not be enabled by systemctl" - exit 5 - fi - systemctl enable $1 + systemctl enable $1 + systemctl is-enabled $1 + enabled=$? + for i in {1..900}; do + if [ $enabled -ne 0 ]; then + systemctl enable $1 + systemctl is-enabled $1 + enabled=$? + else + break + fi + sleep 1 + done + if [ $enabled -ne 0 ] + then + echo "$1 could not be enabled by systemctl" + exit 5 + fi + systemctl enable $1 } function ensureDocker() { - systemctlEnableAndCheck docker - # only start if a reboot is not required - if ! $REBOOTREQUIRED; then - systemctl restart docker - dockerStarted=1 - for i in {1..900}; do - if ! /usr/bin/docker info; then - echo "status $?" - /bin/systemctl restart docker - else - echo "docker started" - dockerStarted=0 - break - fi - sleep 1 - done - if [ $dockerStarted -ne 0 ] - then - echo "docker did not start" - exit 2 - fi - fi + systemctlEnableAndCheck docker + # only start if a reboot is not required + if ! $REBOOTREQUIRED; then + systemctl restart docker + dockerStarted=1 + for i in {1..900}; do + if ! /usr/bin/docker info; then + echo "status $?" + /bin/systemctl restart docker + else + echo "docker started" + dockerStarted=0 + break + fi + sleep 1 + done + if [ $dockerStarted -ne 0 ] + then + echo "docker did not start" + exit 2 + fi + fi } function ensureKubelet() { - systemctlEnableAndCheck kubelet - # only start if a reboot is not required - if ! $REBOOTREQUIRED; then - systemctl restart kubelet - fi + systemctlEnableAndCheck kubelet + # only start if a reboot is not required + if ! $REBOOTREQUIRED; then + systemctl restart kubelet + fi } function extractKubectl(){ - systemctlEnableAndCheck kubectl-extract - # only start if a reboot is not required - if ! $REBOOTREQUIRED; then - systemctl restart kubectl-extract - fi + systemctlEnableAndCheck kubectl-extract + # only start if a reboot is not required + if ! $REBOOTREQUIRED; then + systemctl restart kubectl-extract + fi } function ensureJournal(){ @@ -540,36 +540,36 @@ function ensureJournal(){ } function ensureApiserver() { - if $REBOOTREQUIRED; then - return - fi - kubernetesStarted=1 - for i in {1..600}; do - if [ -e $KUBECTL ] - then - $KUBECTL cluster-info - if [ "$?" = "0" ] - then - echo "kubernetes started" - kubernetesStarted=0 - break - fi - else - /usr/bin/docker ps | grep apiserver - if [ "$?" = "0" ] - then - echo "kubernetes started" - kubernetesStarted=0 - break - fi - fi - sleep 1 - done - if [ $kubernetesStarted -ne 0 ] - then - echo "kubernetes did not start" - exit 3 - fi + if $REBOOTREQUIRED; then + return + fi + kubernetesStarted=1 + for i in {1..600}; do + if [ -e $KUBECTL ] + then + $KUBECTL cluster-info + if [ "$?" = "0" ] + then + echo "kubernetes started" + kubernetesStarted=0 + break + fi + else + /usr/bin/docker ps | grep apiserver + if [ "$?" = "0" ] + then + echo "kubernetes started" + kubernetesStarted=0 + break + fi + fi + sleep 1 + done + if [ $kubernetesStarted -ne 0 ] + then + echo "kubernetes did not start" + exit 3 + fi } function ensureEtcd() { @@ -585,63 +585,63 @@ function ensureEtcd() { } function ensureEtcdDataDir() { - mount | grep /dev/sdc1 | grep /var/lib/etcddisk - if [ "$?" = "0" ] - then - echo "Etcd is running with data dir at: /var/lib/etcddisk" - return - else - echo "/var/lib/etcddisk was not found at /dev/sdc1. Trying to mount all devices." - for i in {1..60}; do - sudo mount -a && mount | grep /dev/sdc1 | grep /var/lib/etcddisk; - if [ "$?" = "0" ] - then - echo "/var/lib/etcddisk mounted at: /dev/sdc1" - return - fi - sleep 5 - done - fi + mount | grep /dev/sdc1 | grep /var/lib/etcddisk + if [ "$?" = "0" ] + then + echo "Etcd is running with data dir at: /var/lib/etcddisk" + return + else + echo "/var/lib/etcddisk was not found at /dev/sdc1. Trying to mount all devices." + for i in {1..60}; do + sudo mount -a && mount | grep /dev/sdc1 | grep /var/lib/etcddisk; + if [ "$?" = "0" ] + then + echo "/var/lib/etcddisk mounted at: /dev/sdc1" + return + fi + sleep 5 + done + fi - echo "Etcd data dir was not found at: /var/lib/etcddisk" - exit 4 + echo "Etcd data dir was not found at: /var/lib/etcddisk" + exit 4 } function writeKubeConfig() { - KUBECONFIGDIR=/home/$ADMINUSER/.kube - KUBECONFIGFILE=$KUBECONFIGDIR/config - mkdir -p $KUBECONFIGDIR - touch $KUBECONFIGFILE - chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR - chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE - chmod 700 $KUBECONFIGDIR - chmod 600 $KUBECONFIGFILE - - # disable logging after secret output - set +x - echo " - --- - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: \"$CA_CERTIFICATE\" - server: https://$MASTER_FQDN.$LOCATION.$FQDNSuffix - name: \"$MASTER_FQDN\" - contexts: - - context: - cluster: \"$MASTER_FQDN\" - user: \"$MASTER_FQDN-admin\" - name: \"$MASTER_FQDN\" - current-context: \"$MASTER_FQDN\" - kind: Config - users: - - name: \"$MASTER_FQDN-admin\" - user: - client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" - client-key-data: \"$KUBECONFIG_KEY\" - " > $KUBECONFIGFILE - # renable logging after secrets - set -x + KUBECONFIGDIR=/home/$ADMINUSER/.kube + KUBECONFIGFILE=$KUBECONFIGDIR/config + mkdir -p $KUBECONFIGDIR + touch $KUBECONFIGFILE + chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR + chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE + chmod 700 $KUBECONFIGDIR + chmod 600 $KUBECONFIGFILE + + # disable logging after secret output + set +x + echo " +--- +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: \"$CA_CERTIFICATE\" + server: https://$MASTER_FQDN.$LOCATION.$FQDNSuffix + name: \"$MASTER_FQDN\" +contexts: +- context: + cluster: \"$MASTER_FQDN\" + user: \"$MASTER_FQDN-admin\" + name: \"$MASTER_FQDN\" +current-context: \"$MASTER_FQDN\" +kind: Config +users: +- name: \"$MASTER_FQDN-admin\" + user: + client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" + client-key-data: \"$KUBECONFIG_KEY\" +" > $KUBECONFIGFILE + # renable logging after secrets + set -x } ensureRunCommandCompleted @@ -653,9 +653,9 @@ if [[ $OS == $UBUNTU_OS_NAME ]]; then fi # master and node -echo `date`,`hostname`, EnsureDockerStart>>/opt/m +echo `date`,`hostname`, EnsureDockerStart>>/opt/m ensureDocker -echo `date`,`hostname`, configNetworkPolicyStart>>/opt/m +echo `date`,`hostname`, configNetworkPolicyStart>>/opt/m configNetworkPolicy if [[ "$CONTAINER_RUNTIME" == "clear-containers" ]]; then # Ensure we can nest virtualization @@ -672,36 +672,36 @@ echo `date`,`hostname`, ensureCRIOStart>>/opt/m ensureCRIO echo `date`,`hostname`, ensureKubeletStart>>/opt/m ensureKubelet -echo `date`,`hostname`, extractKubctlStart>>/opt/m +echo `date`,`hostname`, extractKubctlStart>>/opt/m extractKubectl -echo `date`,`hostname`, ensureJournalStart>>/opt/m +echo `date`,`hostname`, ensureJournalStart>>/opt/m ensureJournal echo `date`,`hostname`, ensureJournalDone>>/opt/m # master only if [[ ! -z "${APISERVER_PRIVATE_KEY}" ]]; then - writeKubeConfig - ensureKubectl - ensureEtcdDataDir - ensureEtcd - ensureApiserver + writeKubeConfig + ensureKubectl + ensureEtcdDataDir + ensureEtcd + ensureApiserver fi if [[ $OS == $UBUNTU_OS_NAME ]]; then - # mitigation for bug https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1676635 - echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind - sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local + # mitigation for bug https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1676635 + echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind + sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local - # If APISERVER_PRIVATE_KEY is empty, then we are not on the master - apt-mark unhold walinuxagent + # If APISERVER_PRIVATE_KEY is empty, then we are not on the master + apt-mark unhold walinuxagent fi echo "Install complete successfully" if $REBOOTREQUIRED; then - # wait 1 minute to restart node, so that the custom script extension can complete - echo 'reboot required, rebooting node in 1 minute' - /bin/bash -c "shutdown -r 1 &" + # wait 1 minute to restart node, so that the custom script extension can complete + echo 'reboot required, rebooting node in 1 minute' + /bin/bash -c "shutdown -r 1 &" fi echo `date`,`hostname`, endscript>>/opt/m