From 647dae5671dafdd311ac5ea7544f69ceec09aaa3 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Wed, 20 Jun 2018 11:08:05 -0700 Subject: [PATCH 01/74] Adding tests to ensure plugins for container monitoring are running fine. (#3298) --- parts/k8s/addons/omsagent-daemonset.yaml | 2 + test/e2e/kubernetes/kubernetes_test.go | 79 ++++++++++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/parts/k8s/addons/omsagent-daemonset.yaml b/parts/k8s/addons/omsagent-daemonset.yaml index 48e94e1f0a..5ba51eb52c 100644 --- a/parts/k8s/addons/omsagent-daemonset.yaml +++ b/parts/k8s/addons/omsagent-daemonset.yaml @@ -222,6 +222,8 @@ metadata: value: my_aks_cluster - name: DISABLE_KUBE_SYSTEM_LOG_COLLECTION value: "true" + - name: ISTEST + value: true securityContext: privileged: true ports: diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 9410fd4f18..068179a925 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -351,6 +351,85 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } }) + It("should have cluster-omsagent daemonset running", func() { + if hasContainerMonitoring, clusterContainerMonitoringAddon := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the correct resources have been applied") + pods, err := pod.GetAllByPrefix("omsagent-", "kube-system") + Expect(err).NotTo(HaveOccurred()) + for i, c := range clusterContainerMonitoringAddon.Containers { + err := pods[0].Spec.Containers[i].ValidateResources(c) + Expect(err).NotTo(HaveOccurred()) + } + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + + It("should have cluster-omsagent replicaset running", func() { + if hasContainerMonitoring, clusterContainerMonitoringAddon := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-rs", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the correct resources have been applied") + pods, err := pod.GetAllByPrefix("omsagent-rs", "kube-system") + Expect(err).NotTo(HaveOccurred()) + for i, c := range clusterContainerMonitoringAddon.Containers { + err := pods[0].Spec.Containers[i].ValidateResources(c) + Expect(err).NotTo(HaveOccurred()) + } + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + + It("should be successfully running kubepodinventory plugin - ContainerMonitoring", func() { + if hasContainerMonitoring, _ := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-rs", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the kubepodinventory plugin is writing data successfully") + pods, err := pod.GetAllByPrefix("omsagent-rs", "kube-system") + Expect(err).NotTo(HaveOccurred()) + _, err = pods[0].Exec("grep", "\"in_kube_podinventory::emit-stream : Success\"", "/var/opt/microsoft/omsagent/log/omsagent.log") + Expect(err).NotTo(HaveOccurred()) + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + + It("should be successfully running kubenodeinventory plugin - ContainerMonitoring", func() { + if hasContainerMonitoring, _ := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-rs", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the kubenodeinventory plugin is writing data successfully") + pods, err := pod.GetAllByPrefix("omsagent-rs", "kube-system") + Expect(err).NotTo(HaveOccurred()) + _, err = pods[0].Exec("grep", "\"in_kube_nodeinventory::emit-stream : Success\"", "/var/opt/microsoft/omsagent/log/omsagent.log") + Expect(err).NotTo(HaveOccurred()) + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + + It("should be successfully running cadvisor_perf plugin - ContainerMonitoring", func() { + if hasContainerMonitoring, _ := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the cadvisor_perf plugin is writing data successfully") + pods, err := pod.GetAllByPrefix("omsagent-", "kube-system") + Expect(err).NotTo(HaveOccurred()) + _, err = pods[0].Exec("grep", "\"in_cadvisor_perf::emit-stream : Success\"", "/var/opt/microsoft/omsagent/log/omsagent.log") + Expect(err).NotTo(HaveOccurred()) + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + It("should have rescheduler running", func() { if hasRescheduler, reschedulerAddon := eng.HasAddon("rescheduler"); hasRescheduler { running, err := pod.WaitOnReady("rescheduler", "kube-system", 3, 30*time.Second, cfg.Timeout) From 0579e605b81b7119326adafc329ceb4aef7f4ae9 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Wed, 20 Jun 2018 11:09:31 -0700 Subject: [PATCH 02/74] More unit tests (#3300) --- pkg/acsengine/ssh_test.go | 82 ++++----------------- pkg/helpers/helpers_test.go | 138 ++++++++++++++++++++++++++++++++++-- 2 files changed, 143 insertions(+), 77 deletions(-) diff --git a/pkg/acsengine/ssh_test.go b/pkg/acsengine/ssh_test.go index fe4f82f96a..cb963336c5 100644 --- a/pkg/acsengine/ssh_test.go +++ b/pkg/acsengine/ssh_test.go @@ -1,87 +1,29 @@ package acsengine import ( - "math/rand" + "os" "testing" - "github.com/Azure/acs-engine/pkg/helpers" - "github.com/Azure/acs-engine/pkg/i18n" ) -func TestCreateSSH(t *testing.T) { - rg := rand.New(rand.NewSource(42)) - - expectedPublicKeyString := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCyx5MHXjJvJAx5DJ9FZNIDa/QTWorSF+Ra21Tz49DQWfdSESnCGFFVBh/MQUFGv5kCenbmqEjsWF177kFOdv1vOTz4sKRlHg7u3I9uCyyZQrWx4X4RdNk7eX+isQVjFXYw2W1rRDUrnK/82qVTv1f0gu1DV4Z7GoIa2jfJ0zBUY3IW0VN9jYaPVuwv4t5y2GwSZF+HBRuOfLfiUgt4+qVFOz4KwRaEBsVfWxlidlT3K3/+ztWpFOmaKIOjQreEWV10ZSo3f9g6j/HdMPtwYvRCtYStbFCRmcbPr9nuR84SAX/4f95KvBAKLnXwb5Bt71D2vAlZSW1Ylv2VbcaZ73+43EpyphYCSg3kOCdwsqE/EU+Swued82SguLALD3mNKbxHGJppFjz3GMyPpJuSH5EE1OANyPxABCwCYycKiNWbOPi3l6o4tMrASYRXi8l3l9JCvioUJ3bXXH6cDpcP4P6QgsuxhwVkUiECU+dbjJXK4gAUVuWKkMOdY7ITh82oU3wOWXbk8K3bdIUp2ylcHeAd2pekGMuaEKGbrXGRiBitCEjl67Bj5opQflgSmI63g8Sa3mKOPGRYMI5MXHMVj4Rns5JFHoENuImrlvrbLv3izAwO61vgN7iK26BwzO7jz92fNOHGviejNWYJyi4vZlq07153NZXP8D2xYTebh9hwHQ==\n" - - expectedPrivateKeyString := `-----BEGIN RSA PRIVATE KEY----- -MIIJKgIBAAKCAgEAsseTB14ybyQMeQyfRWTSA2v0E1qK0hfkWttU8+PQ0Fn3UhEp -whhRVQYfzEFBRr+ZAnp25qhI7Fhde+5BTnb9bzk8+LCkZR4O7tyPbgssmUK1seF+ -EXTZO3l/orEFYxV2MNlta0Q1K5yv/NqlU79X9ILtQ1eGexqCGto3ydMwVGNyFtFT -fY2Gj1bsL+LecthsEmRfhwUbjny34lILePqlRTs+CsEWhAbFX1sZYnZU9yt//s7V -qRTpmiiDo0K3hFlddGUqN3/YOo/x3TD7cGL0QrWErWxQkZnGz6/Z7kfOEgF/+H/e -SrwQCi518G+Qbe9Q9rwJWUltWJb9lW3Gme9/uNxKcqYWAkoN5DgncLKhPxFPksLn -nfNkoLiwCw95jSm8RxiaaRY89xjMj6Sbkh+RBNTgDcj8QAQsAmMnCojVmzj4t5eq -OLTKwEmEV4vJd5fSQr4qFCd211x+nA6XD+D+kILLsYcFZFIhAlPnW4yVyuIAFFbl -ipDDnWOyE4fNqFN8Dll25PCt23SFKdspXB3gHdqXpBjLmhChm61xkYgYrQhI5euw -Y+aKUH5YEpiOt4PEmt5ijjxkWDCOTFxzFY+EZ7OSRR6BDbiJq5b62y794swMDutb -4De4itugcMzu48/dnzThxr4nozVmCcouL2ZatO9edzWVz/A9sWE3m4fYcB0CAwEA -AQKCAgEArQmNvWvm1LvHdsJIxhm3S6iJLNJN2ttVIrt3ljfCPGdXgg8qo7p1vh2X -WVMvoxJ/Pm7Z9pabPmao1PLeMtvooGZ+JRaTh2t4eKjyCki2egCfa/Qc2TiHqZEH -gKhl1mlHZDCOP2xdKkEV9V6K9mwU7YxrqOpmN3CIzQS5SpcmCAfYvU0Nyk/ZFZPE -NvUW6YGf2I1eCIlhCqCcOmm+wPGYVVHp0u7gpBkJoCnEgBCYXEO2NyJqmqSrFZJx -FuvURD1avvXLzrvmxYfdSYHHXBfq40ZdjJ1xvftg+lPyUzcctUDOY+8fcKZlv/UI -IhdZa45ehvGo+sqfE0fRWXhO6V9t9hdHwOq6ZEF2TtaA9qwPpZxiN5BN7G6Vi6Bm -u3HhSCHyEIdySi9/hX3fhDrhPN08NULLhpiKuSiFQesmUxFxWAprMpEyCdx0wva7 -5tZTQQfmVHCoWyVXWNMGTGBA/h8SWquoQWWhpG7UWCt0A0e0kcbegZTQPddxgITe -uqf6GadbajAr6Qwicf5yNH7bVPiD8dGWU07W3t4C0JyLGNLN34aT0OpleSck4dGp -V2UYylQNkf/EmxTY/CCPtNVVKng3CJ+jZvS4MOKvTi+vvsccd8x6BEo9xKetJhAA -SQeNDMu9tEPlZNHC972YNLb+LPm+feqgM2W/qcONtNhPw1INW+ECggEBAOmPO9jz -q6Gm8nNoALteuAD58pJ/suJTfhXbkGBOCG+hazlmk3rGzf9G/cK2jbS3ePoHw7b9 -oJcpoF2L1nUCdwxTJMUS+iyfVRQ4L8lRDC95x3vdBcdgFZUQgEx1L6hKuK5BpZOY -fyvIEmwpW7OpCOEqXeMOq3agR4//uptIyNCzyIPJz43H0dh6m4l+fYy53AOvDAeW -Xk0wERP6bolngkVnz1XbE43UNZqTFkGMF4gjJCbZ+UguOltsZXSPLA+ruRy3oYGn -LVo1ntAf8Ih94F43Y8Doe+VX3y2UJUqQa/ZFG2nu6KeuDWhwRS/XZQSkxrJ0bO2w -6eOCOEqggO7Qz7sCggEBAMP08Q1nPfmwdawEYWqopKeAMh00oMoX14u8UDmYejiH -uBegwzqgmOLfajFMJDnNXTyzxIRIndzrvXzvtFpSHkh29sOXXG9xlGyLWZGcxtzW -ivyTMw/pTg3yjN0qsleRB/o89VOYP2OG+1XjEcie6LNxXUN/wG5gUx8Wumb2c1hW -XBDM6cRbiSuJuINjscUgiHXKQddfu1cVRaNUgP1PGniKydCqdI2rUUQhziTmmj+o -q+dSv6nGRaK3uNhJrhpMlljxy1Mcr9zLP5FM1GjaF+VQ3zHNxDDbXl13rQPpDocw -vu9tAS/J1+vTgKzcHjKnudUWmoNahT3f4/86fc6XJgcCggEBAMK4ry3Goa5JUNPU -vt94LbJqsMlg+9PjxjgU8T7JcBEZpBqcIZL4EqClIEXpCyXC3XKfbJWwyOWeR9wW -DPtKzdQRsZM4qijvwe/0lCqkjqM6RY1IDVxXCEdaFY0pGk2V1nk5tADk4AmxaWKR -7KlR4VxQhSwbe+qP4Hn2vC5gtUQCz8bIR2muUY7JUcmFEslz3zGXDFF7FS4HSAW/ -Ac8+5AZXcS3kU14osXQo8yI82RWgLrDRhBqgp/i227Mc9qAuDEwb8OP2bEJMeBaO -umwhfiEuztTzPvBLnX8Thy+uTsRog12DWKcL3pPXHmevjcIcWqhHltVobOdIFwRo -4nW406cCggEBALmwZ6hy2Ai/DZL3B7VBn93WHicM0v0OwMN6rG8XrWHaQjmprrbk -rlv2qDOU2pMnpx25oBRWl7lcbtBweXBJdsbmbIoF6aL1d1ewaS0R6mQkrcoQVwfR -5pRS7uc56YwPNAcOMs+HazIOHCdUKGr7IrnASEeJTLmLb9j6+aJOEhl4pH+LHk5j -C0YFmKJxG2kYnhc4lVHZNrabwsS2dBEWH5hwtDOXAyGoYTb17dmL6ElAtb1b7aGc -8Cn0fSYAFAp53tLkNe9JNOE+fLtcmb/OQ2ybSRVxzmMZzX82w+37sDetmpFZsxEs -7P5dCwdDAx6vT+q8I6krYy2x9uTJ8aOOGYsCggEAAW9qf3UNuY0IB9kmHF3Oo1gN -s82h0OLpjJkW+5YYC0vYQit4AYNjXw+T+Z6WKOHOG3LIuQVC6Qj4c1+oN6sJi7re -Ey6Zq7/uWmYUpi9C8CbX1clJwany0V2PjGKL94gCIl7vaXS/4ouzzfl8qbF7FjQ4 -Qq/HPWSIC9Z8rKtUDDHeZYaLqvdhqbas/drqCXmeLeYM6Om4lQJdP+zip3Ctulp1 -EPDesL0rH+3s1CKpgkhYdbJ675GFoGoq+X21QaqsdvoXmmuJF9qq9Tq+JaWloUNq -2FWXLhSX02saIdbIheS1fv/LqekXZd8eFXUj7VZ15tPG3SJqORS0pMtxSAJvLw== ------END RSA PRIVATE KEY----- -` - +func TestCreateSaveSSH(t *testing.T) { translator := &i18n.Translator{ Locale: nil, } + username := "test_user" + outputDirectory := "unit_tests" + expectedFile := outputDirectory + "/" + username + "_rsa" - privateKey, publicKey, err := helpers.CreateSSH(rg, translator) - if err != nil { - t.Fatalf("failed to generate SSH: %s", err) - } - privateKeyString := string(privateKeyToPem(privateKey)) + defer os.Remove(expectedFile) - if privateKeyString != expectedPrivateKeyString { - t.Fatalf("Private Key did not match expected format/value") + _, _, err := CreateSaveSSH(username, outputDirectory, translator) + + if err != nil { + t.Fatalf("Unexpected error creating and saving ssh key: %s", err) } - if publicKey != expectedPublicKeyString { - t.Fatalf("Public Key did not match expected format/value") + if _, err := os.Stat(expectedFile); os.IsNotExist(err) { + t.Fatalf("ssh file was not created") } } diff --git a/pkg/helpers/helpers_test.go b/pkg/helpers/helpers_test.go index 4764212ac4..00e7ecaf9a 100644 --- a/pkg/helpers/helpers_test.go +++ b/pkg/helpers/helpers_test.go @@ -1,16 +1,36 @@ package helpers -import "testing" +import ( + "bytes" + "crypto/x509" + "encoding/pem" + "math/rand" + "testing" -func TestPointerToBool(t *testing.T) { - boolVar := true - ret := PointerToBool(boolVar) - if *ret != boolVar { - t.Fatalf("expected PointerToBool(true) to return *true, instead returned %#v", ret) + "github.com/Azure/acs-engine/pkg/i18n" +) + +type ContainerService struct { + ID string `json:"id"` + Location string `json:"location"` + Name string `json:"name"` +} + +func TestJSONMarshal(t *testing.T) { + input := &ContainerService{} + result, _ := JSONMarshal(input, false) + expected := "{\"id\":\"\",\"location\":\"\",\"name\":\"\"}\n" + if string(result) != expected { + t.Fatalf("JSONMarshal returned unexpected result: expected %s but got %s", expected, string(result)) + } + result, _ = JSONMarshalIndent(input, "", "", false) + expected = "{\n\"id\": \"\",\n\"location\": \"\",\n\"name\": \"\"\n}\n" + if string(result) != expected { + t.Fatalf("JSONMarshal returned unexpected result: expected \n%sbut got \n%s", expected, result) } } -func TestIsRegionNormalized(t *testing.T) { +func TestNormalizaAzureRegion(t *testing.T) { cases := []struct { input string expectedResult string @@ -40,3 +60,107 @@ func TestIsRegionNormalized(t *testing.T) { } } } + +func TestPointerToBool(t *testing.T) { + boolVar := true + ret := PointerToBool(boolVar) + if *ret != boolVar { + t.Fatalf("expected PointerToBool(true) to return *true, instead returned %#v", ret) + } + + if IsTrueBoolPointer(ret) != boolVar { + t.Fatalf("expected IsTrueBoolPointer(*true) to return true, instead returned %#v", IsTrueBoolPointer(ret)) + } + + boolVar = false + ret = PointerToBool(boolVar) + if *ret != boolVar { + t.Fatalf("expected PointerToBool(false) to return *false, instead returned %#v", ret) + } + + if IsTrueBoolPointer(ret) != boolVar { + t.Fatalf("expected IsTrueBoolPointer(*false) to return false, instead returned %#v", IsTrueBoolPointer(ret)) + } +} + +func TestCreateSSH(t *testing.T) { + rg := rand.New(rand.NewSource(42)) + + expectedPublicKeyString := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCyx5MHXjJvJAx5DJ9FZNIDa/QTWorSF+Ra21Tz49DQWfdSESnCGFFVBh/MQUFGv5kCenbmqEjsWF177kFOdv1vOTz4sKRlHg7u3I9uCyyZQrWx4X4RdNk7eX+isQVjFXYw2W1rRDUrnK/82qVTv1f0gu1DV4Z7GoIa2jfJ0zBUY3IW0VN9jYaPVuwv4t5y2GwSZF+HBRuOfLfiUgt4+qVFOz4KwRaEBsVfWxlidlT3K3/+ztWpFOmaKIOjQreEWV10ZSo3f9g6j/HdMPtwYvRCtYStbFCRmcbPr9nuR84SAX/4f95KvBAKLnXwb5Bt71D2vAlZSW1Ylv2VbcaZ73+43EpyphYCSg3kOCdwsqE/EU+Swued82SguLALD3mNKbxHGJppFjz3GMyPpJuSH5EE1OANyPxABCwCYycKiNWbOPi3l6o4tMrASYRXi8l3l9JCvioUJ3bXXH6cDpcP4P6QgsuxhwVkUiECU+dbjJXK4gAUVuWKkMOdY7ITh82oU3wOWXbk8K3bdIUp2ylcHeAd2pekGMuaEKGbrXGRiBitCEjl67Bj5opQflgSmI63g8Sa3mKOPGRYMI5MXHMVj4Rns5JFHoENuImrlvrbLv3izAwO61vgN7iK26BwzO7jz92fNOHGviejNWYJyi4vZlq07153NZXP8D2xYTebh9hwHQ==\n" + + expectedPrivateKeyString := `-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEAsseTB14ybyQMeQyfRWTSA2v0E1qK0hfkWttU8+PQ0Fn3UhEp +whhRVQYfzEFBRr+ZAnp25qhI7Fhde+5BTnb9bzk8+LCkZR4O7tyPbgssmUK1seF+ +EXTZO3l/orEFYxV2MNlta0Q1K5yv/NqlU79X9ILtQ1eGexqCGto3ydMwVGNyFtFT +fY2Gj1bsL+LecthsEmRfhwUbjny34lILePqlRTs+CsEWhAbFX1sZYnZU9yt//s7V +qRTpmiiDo0K3hFlddGUqN3/YOo/x3TD7cGL0QrWErWxQkZnGz6/Z7kfOEgF/+H/e +SrwQCi518G+Qbe9Q9rwJWUltWJb9lW3Gme9/uNxKcqYWAkoN5DgncLKhPxFPksLn +nfNkoLiwCw95jSm8RxiaaRY89xjMj6Sbkh+RBNTgDcj8QAQsAmMnCojVmzj4t5eq +OLTKwEmEV4vJd5fSQr4qFCd211x+nA6XD+D+kILLsYcFZFIhAlPnW4yVyuIAFFbl +ipDDnWOyE4fNqFN8Dll25PCt23SFKdspXB3gHdqXpBjLmhChm61xkYgYrQhI5euw +Y+aKUH5YEpiOt4PEmt5ijjxkWDCOTFxzFY+EZ7OSRR6BDbiJq5b62y794swMDutb +4De4itugcMzu48/dnzThxr4nozVmCcouL2ZatO9edzWVz/A9sWE3m4fYcB0CAwEA +AQKCAgEArQmNvWvm1LvHdsJIxhm3S6iJLNJN2ttVIrt3ljfCPGdXgg8qo7p1vh2X +WVMvoxJ/Pm7Z9pabPmao1PLeMtvooGZ+JRaTh2t4eKjyCki2egCfa/Qc2TiHqZEH +gKhl1mlHZDCOP2xdKkEV9V6K9mwU7YxrqOpmN3CIzQS5SpcmCAfYvU0Nyk/ZFZPE +NvUW6YGf2I1eCIlhCqCcOmm+wPGYVVHp0u7gpBkJoCnEgBCYXEO2NyJqmqSrFZJx +FuvURD1avvXLzrvmxYfdSYHHXBfq40ZdjJ1xvftg+lPyUzcctUDOY+8fcKZlv/UI +IhdZa45ehvGo+sqfE0fRWXhO6V9t9hdHwOq6ZEF2TtaA9qwPpZxiN5BN7G6Vi6Bm +u3HhSCHyEIdySi9/hX3fhDrhPN08NULLhpiKuSiFQesmUxFxWAprMpEyCdx0wva7 +5tZTQQfmVHCoWyVXWNMGTGBA/h8SWquoQWWhpG7UWCt0A0e0kcbegZTQPddxgITe +uqf6GadbajAr6Qwicf5yNH7bVPiD8dGWU07W3t4C0JyLGNLN34aT0OpleSck4dGp +V2UYylQNkf/EmxTY/CCPtNVVKng3CJ+jZvS4MOKvTi+vvsccd8x6BEo9xKetJhAA +SQeNDMu9tEPlZNHC972YNLb+LPm+feqgM2W/qcONtNhPw1INW+ECggEBAOmPO9jz +q6Gm8nNoALteuAD58pJ/suJTfhXbkGBOCG+hazlmk3rGzf9G/cK2jbS3ePoHw7b9 +oJcpoF2L1nUCdwxTJMUS+iyfVRQ4L8lRDC95x3vdBcdgFZUQgEx1L6hKuK5BpZOY +fyvIEmwpW7OpCOEqXeMOq3agR4//uptIyNCzyIPJz43H0dh6m4l+fYy53AOvDAeW +Xk0wERP6bolngkVnz1XbE43UNZqTFkGMF4gjJCbZ+UguOltsZXSPLA+ruRy3oYGn +LVo1ntAf8Ih94F43Y8Doe+VX3y2UJUqQa/ZFG2nu6KeuDWhwRS/XZQSkxrJ0bO2w +6eOCOEqggO7Qz7sCggEBAMP08Q1nPfmwdawEYWqopKeAMh00oMoX14u8UDmYejiH +uBegwzqgmOLfajFMJDnNXTyzxIRIndzrvXzvtFpSHkh29sOXXG9xlGyLWZGcxtzW +ivyTMw/pTg3yjN0qsleRB/o89VOYP2OG+1XjEcie6LNxXUN/wG5gUx8Wumb2c1hW +XBDM6cRbiSuJuINjscUgiHXKQddfu1cVRaNUgP1PGniKydCqdI2rUUQhziTmmj+o +q+dSv6nGRaK3uNhJrhpMlljxy1Mcr9zLP5FM1GjaF+VQ3zHNxDDbXl13rQPpDocw +vu9tAS/J1+vTgKzcHjKnudUWmoNahT3f4/86fc6XJgcCggEBAMK4ry3Goa5JUNPU +vt94LbJqsMlg+9PjxjgU8T7JcBEZpBqcIZL4EqClIEXpCyXC3XKfbJWwyOWeR9wW +DPtKzdQRsZM4qijvwe/0lCqkjqM6RY1IDVxXCEdaFY0pGk2V1nk5tADk4AmxaWKR +7KlR4VxQhSwbe+qP4Hn2vC5gtUQCz8bIR2muUY7JUcmFEslz3zGXDFF7FS4HSAW/ +Ac8+5AZXcS3kU14osXQo8yI82RWgLrDRhBqgp/i227Mc9qAuDEwb8OP2bEJMeBaO +umwhfiEuztTzPvBLnX8Thy+uTsRog12DWKcL3pPXHmevjcIcWqhHltVobOdIFwRo +4nW406cCggEBALmwZ6hy2Ai/DZL3B7VBn93WHicM0v0OwMN6rG8XrWHaQjmprrbk +rlv2qDOU2pMnpx25oBRWl7lcbtBweXBJdsbmbIoF6aL1d1ewaS0R6mQkrcoQVwfR +5pRS7uc56YwPNAcOMs+HazIOHCdUKGr7IrnASEeJTLmLb9j6+aJOEhl4pH+LHk5j +C0YFmKJxG2kYnhc4lVHZNrabwsS2dBEWH5hwtDOXAyGoYTb17dmL6ElAtb1b7aGc +8Cn0fSYAFAp53tLkNe9JNOE+fLtcmb/OQ2ybSRVxzmMZzX82w+37sDetmpFZsxEs +7P5dCwdDAx6vT+q8I6krYy2x9uTJ8aOOGYsCggEAAW9qf3UNuY0IB9kmHF3Oo1gN +s82h0OLpjJkW+5YYC0vYQit4AYNjXw+T+Z6WKOHOG3LIuQVC6Qj4c1+oN6sJi7re +Ey6Zq7/uWmYUpi9C8CbX1clJwany0V2PjGKL94gCIl7vaXS/4ouzzfl8qbF7FjQ4 +Qq/HPWSIC9Z8rKtUDDHeZYaLqvdhqbas/drqCXmeLeYM6Om4lQJdP+zip3Ctulp1 +EPDesL0rH+3s1CKpgkhYdbJ675GFoGoq+X21QaqsdvoXmmuJF9qq9Tq+JaWloUNq +2FWXLhSX02saIdbIheS1fv/LqekXZd8eFXUj7VZ15tPG3SJqORS0pMtxSAJvLw== +-----END RSA PRIVATE KEY----- +` + + translator := &i18n.Translator{ + Locale: nil, + } + + privateKey, publicKey, err := CreateSSH(rg, translator) + if err != nil { + t.Fatalf("failed to generate SSH: %s", err) + } + pemBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + } + pemBuffer := bytes.Buffer{} + pem.Encode(&pemBuffer, pemBlock) + + if string(pemBuffer.Bytes()) != expectedPrivateKeyString { + t.Fatalf("Private Key did not match expected format/value") + } + + if publicKey != expectedPublicKeyString { + t.Fatalf("Public Key did not match expected format/value") + } +} From fb20419f2a3d815fd11bc20aec70a7f160aa8014 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Wed, 20 Jun 2018 11:32:23 -0700 Subject: [PATCH 03/74] timeout nc after 3 secs, check 20 times (#3314) --- parts/k8s/kubernetescustomscript.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parts/k8s/kubernetescustomscript.sh b/parts/k8s/kubernetescustomscript.sh index 442a1f7c14..8caa7499f2 100644 --- a/parts/k8s/kubernetescustomscript.sh +++ b/parts/k8s/kubernetescustomscript.sh @@ -55,7 +55,7 @@ else fi function testOutboundConnection() { - retrycmd_if_failure 120 1 20 nc -v 8.8.8.8 53 || retrycmd_if_failure 120 1 20 nc -v 8.8.4.4 53 || exit $ERR_OUTBOUND_CONN_FAIL + retrycmd_if_failure 20 1 3 nc -v 8.8.8.8 53 || retrycmd_if_failure 20 1 3 nc -v 8.8.4.4 53 || exit $ERR_OUTBOUND_CONN_FAIL } function waitForCloudInit() { From d4076e58e39de96998b6c132489e0922e35fc264 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Wed, 20 Jun 2018 11:54:35 -0700 Subject: [PATCH 04/74] fix missing exit (#3315) --- parts/k8s/kubernetescustomscript.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parts/k8s/kubernetescustomscript.sh b/parts/k8s/kubernetescustomscript.sh index 8caa7499f2..295b42a0dc 100644 --- a/parts/k8s/kubernetescustomscript.sh +++ b/parts/k8s/kubernetescustomscript.sh @@ -369,7 +369,7 @@ function extractHyperkube(){ TMP_DIR=$(mktemp -d) retrycmd_if_failure 100 1 30 curl -sSL -o /usr/local/bin/img "https://github.com/genuinetools/img/releases/download/v0.4.6/img-linux-amd64" chmod +x /usr/local/bin/img - retrycmd_if_failure 100 1 60 img pull $HYPERKUBE_URL || $ERR_K8S_DOWNLOAD_TIMEOUT + retrycmd_if_failure 100 1 60 img pull $HYPERKUBE_URL || exit $ERR_K8S_DOWNLOAD_TIMEOUT path=$(find /tmp/img -name "hyperkube") if [[ $OS == $COREOS_OS_NAME ]]; then From 5e08e6aa24829a06180b92e9d97ee844dd1b9aa0 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Wed, 20 Jun 2018 13:57:52 -0700 Subject: [PATCH 05/74] change codecov yaml (#3316) --- .codecov/codecov.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.codecov/codecov.yml b/.codecov/codecov.yml index 3119a9c399..b61a827687 100644 --- a/.codecov/codecov.yml +++ b/.codecov/codecov.yml @@ -1,6 +1,6 @@ codecov: notify: - require_ci_to_pass: yes + require_ci_to_pass: no coverage: precision: 2 From 0925fb8eb791cb9d273ffdcfadb406357a7aa2b7 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Wed, 20 Jun 2018 14:08:53 -0700 Subject: [PATCH 06/74] get img from acs-mirror (#3318) --- parts/k8s/kubernetescustomscript.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parts/k8s/kubernetescustomscript.sh b/parts/k8s/kubernetescustomscript.sh index 295b42a0dc..efd1d7b0fe 100644 --- a/parts/k8s/kubernetescustomscript.sh +++ b/parts/k8s/kubernetescustomscript.sh @@ -367,7 +367,7 @@ function ensureKubelet() { function extractHyperkube(){ TMP_DIR=$(mktemp -d) - retrycmd_if_failure 100 1 30 curl -sSL -o /usr/local/bin/img "https://github.com/genuinetools/img/releases/download/v0.4.6/img-linux-amd64" + retrycmd_if_failure 100 1 30 curl -sSL -o /usr/local/bin/img "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.4.6" chmod +x /usr/local/bin/img retrycmd_if_failure 100 1 60 img pull $HYPERKUBE_URL || exit $ERR_K8S_DOWNLOAD_TIMEOUT path=$(find /tmp/img -name "hyperkube") From ba7f02faa95019749cde14d185e01e9ffeff3030 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Wed, 20 Jun 2018 17:12:57 -0700 Subject: [PATCH 07/74] Fix ssh to get logs in e2e tests (#3322) --- test/e2e/runner/cli_provisioner.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/test/e2e/runner/cli_provisioner.go b/test/e2e/runner/cli_provisioner.go index c6c01cb244..bd8c844761 100644 --- a/test/e2e/runner/cli_provisioner.go +++ b/test/e2e/runner/cli_provisioner.go @@ -250,6 +250,13 @@ func (cli *CLIProvisioner) FetchProvisioningMetrics(path string, cfg *config.Con masterFiles := agentFiles masterFiles = append(masterFiles, "/opt/azure/containers/mountetcd.sh", "/opt/azure/containers/setup-etcd.sh", "/opt/azure/containers/setup-etcd.log") hostname := fmt.Sprintf("%s.%s.cloudapp.azure.com", cli.Config.Name, cli.Config.Location) + cmd := exec.Command("ssh-agent", "-s") + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Error while trying to start ssh agent:%s\nOutput:%s", err, out) + } + authSock := strings.Split(strings.Split(string(out), "=")[1], ";") + os.Setenv("SSH_AUTH_SOCK", authSock[0]) conn, err := remote.NewConnection(hostname, "22", cli.Engine.ClusterDefinition.Properties.LinuxProfile.AdminUsername, cli.Config.GetSSHKeyPath()) if err != nil { return err @@ -273,9 +280,9 @@ func (cli *CLIProvisioner) FetchProvisioningMetrics(path string, cfg *config.Con } connectString := fmt.Sprintf("%s@%s:/tmp/k8s-*", conn.User, hostname) logsPath := filepath.Join(cfg.CurrentWorkingDir, "_logs", hostname) - cmd := exec.Command("scp", "-i", conn.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, logsPath) + cmd = exec.Command("scp", "-i", conn.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, logsPath) util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err = cmd.CombinedOutput() if err != nil { log.Printf("Error output:%s\n", out) return err From 84ee71189d5542ee9f01fa82c7de3117a8536aef Mon Sep 17 00:00:00 2001 From: Michalis Kargakis Date: Thu, 21 Jun 2018 11:13:18 +0200 Subject: [PATCH 08/74] Move openshift infra resources into separate template file (#3310) --- parts/k8s/kubernetesbase.t | 3 + parts/k8s/kubernetesmasterresources.t | 159 -------------------------- parts/openshift/infraresources.t | 157 +++++++++++++++++++++++++ pkg/acsengine/const.go | 1 + pkg/acsengine/engine.go | 1 + 5 files changed, 162 insertions(+), 159 deletions(-) create mode 100644 parts/openshift/infraresources.t diff --git a/parts/k8s/kubernetesbase.t b/parts/k8s/kubernetesbase.t index e387be32c6..d9efe71ab2 100644 --- a/parts/k8s/kubernetesbase.t +++ b/parts/k8s/kubernetesbase.t @@ -51,6 +51,9 @@ {{template "k8s/kubernetesmastervars.t" .}} }, "resources": [ + {{if IsOpenShift}} + {{template "openshift/infraresources.t" .}} + {{end}} {{ range $index, $element := .AgentPoolProfiles}} {{if $index}}, {{end}} {{if .IsWindows}} diff --git a/parts/k8s/kubernetesmasterresources.t b/parts/k8s/kubernetesmasterresources.t index 8588639701..399bb4826a 100644 --- a/parts/k8s/kubernetesmasterresources.t +++ b/parts/k8s/kubernetesmasterresources.t @@ -1,162 +1,3 @@ -{{if IsOpenShift}} - { - "type": "Microsoft.Network/networkSecurityGroups", - "apiVersion": "[variables('apiVersionDefault')]", - "location": "[variables('location')]", - "name": "[variables('routerNSGName')]", - "properties": { - "securityRules": [ - { - "name": "allow_http", - "properties": { - "access": "Allow", - "description": "Allow http traffic to infra nodes", - "destinationAddressPrefix": "*", - "destinationPortRange": "80", - "direction": "Inbound", - "priority": 110, - "protocol": "Tcp", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } - }, - { - "name": "allow_https", - "properties": { - "access": "Allow", - "description": "Allow https traffic to infra nodes", - "destinationAddressPrefix": "*", - "destinationPortRange": "443", - "direction": "Inbound", - "priority": 111, - "protocol": "Tcp", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } - } - ] - } - }, - { - "name": "[variables('routerIPName')]", - "type": "Microsoft.Network/publicIPAddresses", - "apiVersion": "2017-08-01", - "location": "[variables('location')]", - "properties": { - "publicIPAllocationMethod": "Static", - "dnsSettings": { - "domainNameLabel": "[concat(variables('masterFqdnPrefix'), '-router')]" - } - }, - "sku": { - "name": "Basic" - } - }, - { - "name": "[variables('routerLBName')]", - "type": "Microsoft.Network/loadBalancers", - "apiVersion": "2017-10-01", - "location": "[variables('location')]", - "dependsOn": [ - "[concat('Microsoft.Network/publicIPAddresses/', variables('routerIPName'))]" - ], - "properties": { - "frontendIPConfigurations": [ - { - "name": "frontend", - "properties": { - "privateIPAllocationMethod": "Dynamic", - "publicIPAddress": { - "id": "[resourceId('Microsoft.Network/publicIPAddresses', variables('routerIPName'))]" - } - } - } - ], - "backendAddressPools": [ - { - "name": "backend" - } - ], - "loadBalancingRules": [ - { - "name": "port-80", - "properties": { - "frontendIPConfiguration": { - "id": "[concat(variables('routerLBID'), '/frontendIPConfigurations/frontend')]" - }, - "frontendPort": 80, - "backendPort": 80, - "enableFloatingIP": false, - "idleTimeoutInMinutes": 4, - "protocol": "Tcp", - "loadDistribution": "Default", - "backendAddressPool": { - "id": "[concat(variables('routerLBID'), '/backendAddressPools/backend')]" - }, - "probe": { - "id": "[concat(variables('routerLBID'), '/probes/port-80')]" - } - } - }, - { - "name": "port-443", - "properties": { - "frontendIPConfiguration": { - "id": "[concat(variables('routerLBID'), '/frontendIPConfigurations/frontend')]" - }, - "frontendPort": 443, - "backendPort": 443, - "enableFloatingIP": false, - "idleTimeoutInMinutes": 4, - "protocol": "Tcp", - "loadDistribution": "Default", - "backendAddressPool": { - "id": "[concat(variables('routerLBID'), '/backendAddressPools/backend')]" - }, - "probe": { - "id": "[concat(variables('routerLBID'), '/probes/port-443')]" - } - } - } - ], - "probes": [ - { - "name": "port-80", - "properties": { - "protocol": "Tcp", - "port": 80, - "intervalInSeconds": 5, - "numberOfProbes": 2 - } - }, - { - "name": "port-443", - "properties": { - "protocol": "Tcp", - "port": 443, - "intervalInSeconds": 5, - "numberOfProbes": 2 - } - } - ], - "inboundNatRules": [], - "outboundNatRules": [], - "inboundNatPools": [] - }, - "sku": { - "name": "Basic" - } - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('apiVersionStorage')]", - "name": "[concat(variables('storageAccountBaseName'), 'registry')]", - "location": "[variables('location')]", - "properties": { - "accountType": "Standard_LRS" - } - }, -{{end}} {{if .MasterProfile.IsManagedDisks}} { "apiVersion": "[variables('apiVersionStorageManagedDisks')]", diff --git a/parts/openshift/infraresources.t b/parts/openshift/infraresources.t new file mode 100644 index 0000000000..30d1c39e80 --- /dev/null +++ b/parts/openshift/infraresources.t @@ -0,0 +1,157 @@ + { + "type": "Microsoft.Network/networkSecurityGroups", + "apiVersion": "[variables('apiVersionDefault')]", + "location": "[variables('location')]", + "name": "[variables('routerNSGName')]", + "properties": { + "securityRules": [ + { + "name": "allow_http", + "properties": { + "access": "Allow", + "description": "Allow http traffic to infra nodes", + "destinationAddressPrefix": "*", + "destinationPortRange": "80", + "direction": "Inbound", + "priority": 110, + "protocol": "Tcp", + "sourceAddressPrefix": "*", + "sourcePortRange": "*" + } + }, + { + "name": "allow_https", + "properties": { + "access": "Allow", + "description": "Allow https traffic to infra nodes", + "destinationAddressPrefix": "*", + "destinationPortRange": "443", + "direction": "Inbound", + "priority": 111, + "protocol": "Tcp", + "sourceAddressPrefix": "*", + "sourcePortRange": "*" + } + } + ] + } + }, + { + "name": "[variables('routerIPName')]", + "type": "Microsoft.Network/publicIPAddresses", + "apiVersion": "2017-08-01", + "location": "[variables('location')]", + "properties": { + "publicIPAllocationMethod": "Static", + "dnsSettings": { + "domainNameLabel": "[concat(variables('masterFqdnPrefix'), '-router')]" + } + }, + "sku": { + "name": "Basic" + } + }, + { + "name": "[variables('routerLBName')]", + "type": "Microsoft.Network/loadBalancers", + "apiVersion": "2017-10-01", + "location": "[variables('location')]", + "dependsOn": [ + "[concat('Microsoft.Network/publicIPAddresses/', variables('routerIPName'))]" + ], + "properties": { + "frontendIPConfigurations": [ + { + "name": "frontend", + "properties": { + "privateIPAllocationMethod": "Dynamic", + "publicIPAddress": { + "id": "[resourceId('Microsoft.Network/publicIPAddresses', variables('routerIPName'))]" + } + } + } + ], + "backendAddressPools": [ + { + "name": "backend" + } + ], + "loadBalancingRules": [ + { + "name": "port-80", + "properties": { + "frontendIPConfiguration": { + "id": "[concat(variables('routerLBID'), '/frontendIPConfigurations/frontend')]" + }, + "frontendPort": 80, + "backendPort": 80, + "enableFloatingIP": false, + "idleTimeoutInMinutes": 4, + "protocol": "Tcp", + "loadDistribution": "Default", + "backendAddressPool": { + "id": "[concat(variables('routerLBID'), '/backendAddressPools/backend')]" + }, + "probe": { + "id": "[concat(variables('routerLBID'), '/probes/port-80')]" + } + } + }, + { + "name": "port-443", + "properties": { + "frontendIPConfiguration": { + "id": "[concat(variables('routerLBID'), '/frontendIPConfigurations/frontend')]" + }, + "frontendPort": 443, + "backendPort": 443, + "enableFloatingIP": false, + "idleTimeoutInMinutes": 4, + "protocol": "Tcp", + "loadDistribution": "Default", + "backendAddressPool": { + "id": "[concat(variables('routerLBID'), '/backendAddressPools/backend')]" + }, + "probe": { + "id": "[concat(variables('routerLBID'), '/probes/port-443')]" + } + } + } + ], + "probes": [ + { + "name": "port-80", + "properties": { + "protocol": "Tcp", + "port": 80, + "intervalInSeconds": 5, + "numberOfProbes": 2 + } + }, + { + "name": "port-443", + "properties": { + "protocol": "Tcp", + "port": 443, + "intervalInSeconds": 5, + "numberOfProbes": 2 + } + } + ], + "inboundNatRules": [], + "outboundNatRules": [], + "inboundNatPools": [] + }, + "sku": { + "name": "Basic" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('apiVersionStorage')]", + "name": "[concat(variables('storageAccountBaseName'), 'registry')]", + "location": "[variables('location')]", + "properties": { + "accountType": "Standard_LRS" + } + }, \ No newline at end of file diff --git a/pkg/acsengine/const.go b/pkg/acsengine/const.go index 5ed6d8942e..b72557ec09 100644 --- a/pkg/acsengine/const.go +++ b/pkg/acsengine/const.go @@ -260,6 +260,7 @@ const ( kubernetesWinAgentVarsVMSS = "k8s/kuberneteswinagentresourcesvmss.t" masterOutputs = "masteroutputs.t" masterParams = "masterparams.t" + openshiftInfraResources = "openshift/infraresources.t" swarmBaseFile = "swarm/swarmbase.t" swarmParams = "swarm/swarmparams.t" swarmAgentResourcesVMAS = "swarm/swarmagentresourcesvmas.t" diff --git a/pkg/acsengine/engine.go b/pkg/acsengine/engine.go index 89b1cd265a..5edddb8268 100644 --- a/pkg/acsengine/engine.go +++ b/pkg/acsengine/engine.go @@ -32,6 +32,7 @@ var swarmTemplateFiles = []string{swarmBaseFile, swarmParams, swarmAgentResource var swarmModeTemplateFiles = []string{swarmBaseFile, swarmParams, swarmAgentResourcesVMAS, swarmAgentVars, swarmAgentResourcesVMSS, swarmAgentResourcesClassic, swarmBaseFile, swarmMasterResources, swarmMasterVars, swarmWinAgentResourcesVMAS, swarmWinAgentResourcesVMSS} var openshiftTemplateFiles = append( kubernetesTemplateFiles, + openshiftInfraResources, openshiftNodeScript, openshiftMasterScript, openshift39NodeScript, From acac8b0d2746f54e612dd5da6b19caacfb4ea807 Mon Sep 17 00:00:00 2001 From: Jim Minter Date: Thu, 21 Jun 2018 09:11:19 -0500 Subject: [PATCH 09/74] attempt to deal with waagent and docker racing (#3312) --- .../release-3.9/openshiftmasterscript.sh | 7 ----- .../release-3.9/openshiftnodescript.sh | 7 ----- .../unstable/openshiftmasterscript.sh | 29 +++++++++++++++++-- .../openshift/unstable/openshiftnodescript.sh | 29 +++++++++++++++++-- 4 files changed, 52 insertions(+), 20 deletions(-) diff --git a/parts/openshift/release-3.9/openshiftmasterscript.sh b/parts/openshift/release-3.9/openshiftmasterscript.sh index cacf656459..fcf0038c03 100644 --- a/parts/openshift/release-3.9/openshiftmasterscript.sh +++ b/parts/openshift/release-3.9/openshiftmasterscript.sh @@ -31,13 +31,6 @@ else COCKPIT_VERSION="latest" fi -# TODO: with WALinuxAgent>=v2.2.21 (https://github.com/Azure/WALinuxAgent/pull/1005) -# we should be able to append context=system_u:object_r:container_var_lib_t:s0 -# to ResourceDisk.MountOptions in /etc/waagent.conf and remove this stanza. -systemctl stop docker.service -restorecon -R /var/lib/docker -systemctl start docker.service - echo "BOOTSTRAP_CONFIG_NAME=node-config-master" >>/etc/sysconfig/${SERVICE_TYPE}-node for dst in tcp,2379 tcp,2380 tcp,8443 tcp,8444 tcp,8053 udp,8053 tcp,9090; do diff --git a/parts/openshift/release-3.9/openshiftnodescript.sh b/parts/openshift/release-3.9/openshiftnodescript.sh index af105ed702..b7b64e38d8 100644 --- a/parts/openshift/release-3.9/openshiftnodescript.sh +++ b/parts/openshift/release-3.9/openshiftnodescript.sh @@ -7,13 +7,6 @@ if [ -f "/etc/sysconfig/atomic-openshift-node" ]; then SERVICE_TYPE=atomic-openshift fi -# TODO: with WALinuxAgent>=v2.2.21 (https://github.com/Azure/WALinuxAgent/pull/1005) -# we should be able to append context=system_u:object_r:container_var_lib_t:s0 -# to ResourceDisk.MountOptions in /etc/waagent.conf and remove this stanza. -systemctl stop docker.service -restorecon -R /var/lib/docker -systemctl start docker.service - {{if eq .Role "infra"}} echo "BOOTSTRAP_CONFIG_NAME=node-config-infra" >>/etc/sysconfig/${SERVICE_TYPE}-node {{else}} diff --git a/parts/openshift/unstable/openshiftmasterscript.sh b/parts/openshift/unstable/openshiftmasterscript.sh index e0329b3384..fc4916bd62 100644 --- a/parts/openshift/unstable/openshiftmasterscript.sh +++ b/parts/openshift/unstable/openshiftmasterscript.sh @@ -32,10 +32,33 @@ else COCKPIT_VERSION="latest" fi -# TODO: with WALinuxAgent>=v2.2.21 (https://github.com/Azure/WALinuxAgent/pull/1005) -# we should be able to append context=system_u:object_r:container_var_lib_t:s0 -# to ResourceDisk.MountOptions in /etc/waagent.conf and remove this stanza. +if grep -q ^ResourceDisk.Filesystem=xfs /etc/waagent.conf; then + # Bad image: docker and waagent are racing. Try to fix up. Leave this code + # until the bad images have gone away. + set +e + + # stop docker if it hasn't failed already + systemctl stop docker.service + + # wait until waagent has run mkfs and mounted /var/lib/docker + while ! mountpoint -q /var/lib/docker; do + sleep 1 + done + + # now roll us back. /var/lib/docker/* may be mounted if docker lost the + # race. + umount /var/lib/docker + umount /var/lib/docker/* + + # disable waagent from racing again if we reboot. + sed -i -e '/^ResourceDisk.Format=/ s/=.*/=n/' /etc/waagent.conf + set -e +fi + systemctl stop docker.service +mkfs.xfs -f /dev/sdb1 +echo '/dev/sdb1 /var/lib/docker xfs grpquota 0 0' >>/etc/fstab +mount /var/lib/docker restorecon -R /var/lib/docker systemctl start docker.service diff --git a/parts/openshift/unstable/openshiftnodescript.sh b/parts/openshift/unstable/openshiftnodescript.sh index a42839397e..9a8ae3fddf 100644 --- a/parts/openshift/unstable/openshiftnodescript.sh +++ b/parts/openshift/unstable/openshiftnodescript.sh @@ -7,10 +7,33 @@ if [ -f "/etc/sysconfig/atomic-openshift-node" ]; then SERVICE_TYPE=atomic-openshift fi -# TODO: with WALinuxAgent>=v2.2.21 (https://github.com/Azure/WALinuxAgent/pull/1005) -# we should be able to append context=system_u:object_r:container_var_lib_t:s0 -# to ResourceDisk.MountOptions in /etc/waagent.conf and remove this stanza. +if grep -q ^ResourceDisk.Filesystem=xfs /etc/waagent.conf; then + # Bad image: docker and waagent are racing. Try to fix up. Leave this code + # until the bad images have gone away. + set +e + + # stop docker if it hasn't failed already + systemctl stop docker.service + + # wait until waagent has run mkfs and mounted /var/lib/docker + while ! mountpoint -q /var/lib/docker; do + sleep 1 + done + + # now roll us back. /var/lib/docker/* may be mounted if docker lost the + # race. + umount /var/lib/docker + umount /var/lib/docker/* + + # disable waagent from racing again if we reboot. + sed -i -e '/^ResourceDisk.Format=/ s/=.*/=n/' /etc/waagent.conf + set -e +fi + systemctl stop docker.service +mkfs.xfs -f /dev/sdb1 +echo '/dev/sdb1 /var/lib/docker xfs grpquota 0 0' >>/etc/fstab +mount /var/lib/docker restorecon -R /var/lib/docker systemctl start docker.service From e65828346214dbf64e7520975f13a6c8a06691f9 Mon Sep 17 00:00:00 2001 From: Jim Minter Date: Thu, 21 Jun 2018 13:15:46 -0500 Subject: [PATCH 10/74] openshift: workaround other bad images with an additional umount (#3328) --- parts/openshift/unstable/openshiftmasterscript.sh | 2 ++ parts/openshift/unstable/openshiftnodescript.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/parts/openshift/unstable/openshiftmasterscript.sh b/parts/openshift/unstable/openshiftmasterscript.sh index fc4916bd62..9d7d3a4fec 100644 --- a/parts/openshift/unstable/openshiftmasterscript.sh +++ b/parts/openshift/unstable/openshiftmasterscript.sh @@ -56,6 +56,8 @@ if grep -q ^ResourceDisk.Filesystem=xfs /etc/waagent.conf; then fi systemctl stop docker.service +# Also a bad image: the umount should also go away. +umount /var/lib/docker || true mkfs.xfs -f /dev/sdb1 echo '/dev/sdb1 /var/lib/docker xfs grpquota 0 0' >>/etc/fstab mount /var/lib/docker diff --git a/parts/openshift/unstable/openshiftnodescript.sh b/parts/openshift/unstable/openshiftnodescript.sh index 9a8ae3fddf..47dcce76a4 100644 --- a/parts/openshift/unstable/openshiftnodescript.sh +++ b/parts/openshift/unstable/openshiftnodescript.sh @@ -31,6 +31,8 @@ if grep -q ^ResourceDisk.Filesystem=xfs /etc/waagent.conf; then fi systemctl stop docker.service +# Also a bad image: the umount should also go away. +umount /var/lib/docker || true mkfs.xfs -f /dev/sdb1 echo '/dev/sdb1 /var/lib/docker xfs grpquota 0 0' >>/etc/fstab mount /var/lib/docker From 186165b280cf0aee8beff1e784e70cace24a8070 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 21 Jun 2018 11:16:37 -0700 Subject: [PATCH 11/74] enable k8s v1.10.5 and v1.11.0-rc.1 (#3327) --- pkg/api/common/versions.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/api/common/versions.go b/pkg/api/common/versions.go index 224d221018..6aa593ad69 100644 --- a/pkg/api/common/versions.go +++ b/pkg/api/common/versions.go @@ -58,10 +58,12 @@ var AllKubernetesSupportedVersions = map[string]bool{ "1.10.2": true, "1.10.3": true, "1.10.4": true, + "1.10.5": true, "1.11.0-alpha.1": true, "1.11.0-alpha.2": true, "1.11.0-beta.1": true, "1.11.0-beta.2": true, + "1.11.0-rc.1": true, } // GetDefaultKubernetesVersion returns the default Kubernetes version, that is the latest patch of the default release From c92048da6614f96ea92c70e57f625c8653264b68 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Thu, 21 Jun 2018 12:09:02 -0700 Subject: [PATCH 12/74] Reduce docker pull timeout (#3330) --- parts/k8s/kubernetescustomscript.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parts/k8s/kubernetescustomscript.sh b/parts/k8s/kubernetescustomscript.sh index efd1d7b0fe..9e1534be62 100644 --- a/parts/k8s/kubernetescustomscript.sh +++ b/parts/k8s/kubernetescustomscript.sh @@ -369,7 +369,7 @@ function extractHyperkube(){ TMP_DIR=$(mktemp -d) retrycmd_if_failure 100 1 30 curl -sSL -o /usr/local/bin/img "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.4.6" chmod +x /usr/local/bin/img - retrycmd_if_failure 100 1 60 img pull $HYPERKUBE_URL || exit $ERR_K8S_DOWNLOAD_TIMEOUT + retrycmd_if_failure 75 1 60 img pull $HYPERKUBE_URL || exit $ERR_K8S_DOWNLOAD_TIMEOUT path=$(find /tmp/img -name "hyperkube") if [[ $OS == $COREOS_OS_NAME ]]; then From 6da34350dfddd3bfddfb9b7cbfb42e4ff240f843 Mon Sep 17 00:00:00 2001 From: Diego Casati Date: Thu, 21 Jun 2018 13:09:25 -0700 Subject: [PATCH 13/74] Update kubernetes.md (#3329) update broken link for the Kubernetes Examples --- docs/kubernetes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/kubernetes.md b/docs/kubernetes.md index 6d8bc7b6b6..954055aaa4 100644 --- a/docs/kubernetes.md +++ b/docs/kubernetes.md @@ -22,4 +22,4 @@ Here are recommended links to learn more about Kubernetes: 1. [Kubernetes Bootcamp](https://kubernetesbootcamp.github.io/kubernetes-bootcamp/index.html) - shows you how to deploy, scale, update, and debug containerized applications. 2. [Kubernetes Userguide](http://kubernetes.io/docs/user-guide/) - provides information on running programs in an existing Kubernetes cluster. -3. [Kubernetes Examples](https://github.com/kubernetes/kubernetes/tree/master/examples) - provides a number of examples on how to run real applications with Kubernetes. +3. [Kubernetes Examples](https://github.com/kubernetes/examples) - provides a number of examples on how to run real applications with Kubernetes. From d70808a7f888c79e9f23601df46db3577223deed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Serta=C3=A7=20=C3=96zercan?= Date: Thu, 21 Jun 2018 14:38:58 -0700 Subject: [PATCH 14/74] Update Cluster Autoscaler to v1.3 (#3320) * add CA v1.3 for k8s v1.11 * update tests * update tag --- docs/clusterdefinition.md | 603 ++++++++++--------- examples/addons/cluster-autoscaler/README.md | 6 +- pkg/acsengine/k8s_versions.go | 2 + pkg/acsengine/k8s_versions_test.go | 1 + 4 files changed, 314 insertions(+), 298 deletions(-) diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index 29de840659..1dfd4181dc 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -6,70 +6,71 @@ Here are the cluster definitions for apiVersion "vlabs": ### apiVersion -|Name|Required|Description| -|---|---|---| -|apiVersion|yes|The version of the template. For "vlabs" the value is "vlabs"| +| Name | Required | Description | +| ---------- | -------- | ------------------------------------------------------------- | +| apiVersion | yes | The version of the template. For "vlabs" the value is "vlabs" | ### orchestratorProfile + `orchestratorProfile` describes the orchestrator settings. -|Name|Required|Description| -|---|---|---| -|orchestratorType|yes|Specifies the orchestrator type for the cluster| +| Name | Required | Description | +| ---------------- | -------- | ----------------------------------------------- | +| orchestratorType | yes | Specifies the orchestrator type for the cluster | Here are the valid values for the orchestrator types: -1. `DCOS` - this represents the [DC/OS orchestrator](dcos.md). [Older releases of DCOS 1.8 may be specified](../examples/dcos-releases). -2. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md). -3. `Swarm` - this represents the [Swarm orchestrator](swarm.md). -4. `Swarm Mode` - this represents the [Swarm Mode orchestrator](swarmmode.md). -5. `OpenShift` - this represents the [OpenShift orchestrator](openshift.md). +1. `DCOS` - this represents the [DC/OS orchestrator](dcos.md). [Older releases of DCOS 1.8 may be specified](../examples/dcos-releases). +2. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md). +3. `Swarm` - this represents the [Swarm orchestrator](swarm.md). +4. `Swarm Mode` - this represents the [Swarm Mode orchestrator](swarmmode.md). +5. `OpenShift` - this represents the [OpenShift orchestrator](openshift.md). ### kubernetesConfig `kubernetesConfig` describes Kubernetes specific configuration. -|Name|Required|Description| -|---|---|---| -|addons|no|Configure various Kubernetes addons configuration (currently supported: tiller, kubernetes-dashboard). See `addons` configuration below| -|apiServerConfig|no|Configure various runtime configuration for apiserver. See `apiServerConfig` [below](#feat-apiserver-config)| -|cloudControllerManagerConfig|no|Configure various runtime configuration for cloud-controller-manager. See `cloudControllerManagerConfig` [below](#feat-cloud-controller-manager-config)| -|clusterSubnet|no|The IP subnet used for allocating IP addresses for pod network interfaces. The subnet must be in the VNET address space. Default value is 10.244.0.0/16| -|containerRuntime|no|The container runtime to use as a backend. The default is `docker`. The other options are `clear-containers` and `containerd`| -|controllerManagerConfig|no|Configure various runtime configuration for controller-manager. See `controllerManagerConfig` [below](#feat-controller-manager-config)| -|customWindowsPackageURL|no|Configure custom windows Kubernetes release package URL for deployment on Windows| -|dnsServiceIP|no|IP address for kube-dns to listen on. If specified must be in the range of `serviceCidr`| -|dockerBridgeSubnet|no|The specific IP and subnet used for allocating IP addresses for the docker bridge network created on the kubernetes master and agents. Default value is 172.17.0.1/16. This value is used to configure the docker daemon using the [--bip flag](https://docs.docker.com/engine/userguide/networking/default_network/custom-docker0)| -|dockerEngineVersion|no|Which version of docker-engine to use in your cluster, e.g. "17.03.*"| -|enableAggregatedAPIs|no|Enable [Kubernetes Aggregated APIs](https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/).This is required by [Service Catalog](https://github.com/kubernetes-incubator/service-catalog/blob/master/README.md). (boolean - default == false) | -|enableDataEncryptionAtRest|no|Enable [kubernetes data encryption at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | -|enableEncryptionWithExternalKms|no|Enable [kubernetes data encryption at rest with external KMS](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | -|enablePodSecurityPolicy|no|Enable [kubernetes pod security policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/).This is currently a beta feature. (boolean - default == false)| -|enableRbac|no|Enable [Kubernetes RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) (boolean - default == true) | -|etcdDiskSizeGB|no|Size in GB to assign to etcd data volume. Defaults (if no user value provided) are: 256 GB for clusters up to 3 nodes; 512 GB for clusters with between 4 and 10 nodes; 1024 GB for clusters with between 11 and 20 nodes; and 2048 GB for clusters with more than 20 nodes| -|etcdEncryptionKey|no|Enryption key to be used if enableDataEncryptionAtRest is enabled. Defaults to a random, generated, key| -|gcHighThreshold|no|Sets the --image-gc-high-threshold value on the kublet configuration. Default is 85. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) | -|gcLowThreshold|no|Sets the --image-gc-low-threshold value on the kublet configuration. Default is 80. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) | -|kubeletConfig|no|Configure various runtime configuration for kubelet. See `kubeletConfig` [below](#feat-kubelet-config)| -|kubernetesImageBase|no|Specifies the base URL (everything preceding the actual image filename) of the kubernetes hyperkube image to use for cluster deployment, e.g., `k8s-gcrio.azureedge.net/`| -|networkPlugin|no|Specifies the network plugin implementation for the cluster. Valid values are:
`"azure"` (default), which provides an Azure native networking experience
`"kubenet"` for k8s software networking implementation.
`"flannel"` for using CoreOS Flannel
`"cilium"` for using the default Cilium CNI IPAM | -|networkPolicy|no|Specifies the network policy enforcement tool for the cluster (currently Linux-only). Valid values are:
`calico` for Calico network policy.
`cilium` for cilium network policy (Lin).
See [network policy examples](../examples/networkpolicy) for more information| -|privateCluster|no|Build a cluster without public addresses assigned. See `privateClusters` [below](#feat-private-cluster).| -|schedulerConfig|no|Configure various runtime configuration for scheduler. See `schedulerConfig` [below](#feat-scheduler-config)| -|serviceCidr|no|IP range for Service IPs, Default is "10.0.0.0/16". This range is never routed outside of a node so does not need to lie within clusterSubnet or the VNET| -|useInstanceMetadata|no|Use the Azure cloudprovider instance metadata service for appropriate resource discovery operations. Default is `true`| -|useManagedIdentity|no| Includes and uses MSI identities for all interactions with the Azure Resource Manager (ARM) API. Instead of using a static service principal written to /etc/kubernetes/azure.json, Kubernetes will use a dynamic, time-limited token fetched from the MSI extension running on master and agent nodes. This support is currently alpha and requires Kubernetes v1.9.1 or newer. (boolean - default == false) | +| Name | Required | Description | +| ------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| addons | no | Configure various Kubernetes addons configuration (currently supported: tiller, kubernetes-dashboard). See `addons` configuration below | +| apiServerConfig | no | Configure various runtime configuration for apiserver. See `apiServerConfig` [below](#feat-apiserver-config) | +| cloudControllerManagerConfig | no | Configure various runtime configuration for cloud-controller-manager. See `cloudControllerManagerConfig` [below](#feat-cloud-controller-manager-config) | +| clusterSubnet | no | The IP subnet used for allocating IP addresses for pod network interfaces. The subnet must be in the VNET address space. Default value is 10.244.0.0/16 | +| containerRuntime | no | The container runtime to use as a backend. The default is `docker`. The other options are `clear-containers` and `containerd` | +| controllerManagerConfig | no | Configure various runtime configuration for controller-manager. See `controllerManagerConfig` [below](#feat-controller-manager-config) | +| customWindowsPackageURL | no | Configure custom windows Kubernetes release package URL for deployment on Windows | +| dnsServiceIP | no | IP address for kube-dns to listen on. If specified must be in the range of `serviceCidr` | +| dockerBridgeSubnet | no | The specific IP and subnet used for allocating IP addresses for the docker bridge network created on the kubernetes master and agents. Default value is 172.17.0.1/16. This value is used to configure the docker daemon using the [--bip flag](https://docs.docker.com/engine/userguide/networking/default_network/custom-docker0) | +| dockerEngineVersion | no | Which version of docker-engine to use in your cluster, e.g. "17.03.\*" | +| enableAggregatedAPIs | no | Enable [Kubernetes Aggregated APIs](https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/).This is required by [Service Catalog](https://github.com/kubernetes-incubator/service-catalog/blob/master/README.md). (boolean - default == false) | +| enableDataEncryptionAtRest | no | Enable [kubernetes data encryption at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | +| enableEncryptionWithExternalKms | no | Enable [kubernetes data encryption at rest with external KMS](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | +| enablePodSecurityPolicy | no | Enable [kubernetes pod security policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/).This is currently a beta feature. (boolean - default == false) | +| enableRbac | no | Enable [Kubernetes RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) (boolean - default == true) | +| etcdDiskSizeGB | no | Size in GB to assign to etcd data volume. Defaults (if no user value provided) are: 256 GB for clusters up to 3 nodes; 512 GB for clusters with between 4 and 10 nodes; 1024 GB for clusters with between 11 and 20 nodes; and 2048 GB for clusters with more than 20 nodes | +| etcdEncryptionKey | no | Enryption key to be used if enableDataEncryptionAtRest is enabled. Defaults to a random, generated, key | +| gcHighThreshold | no | Sets the --image-gc-high-threshold value on the kublet configuration. Default is 85. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) | +| gcLowThreshold | no | Sets the --image-gc-low-threshold value on the kublet configuration. Default is 80. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) | +| kubeletConfig | no | Configure various runtime configuration for kubelet. See `kubeletConfig` [below](#feat-kubelet-config) | +| kubernetesImageBase | no | Specifies the base URL (everything preceding the actual image filename) of the kubernetes hyperkube image to use for cluster deployment, e.g., `k8s-gcrio.azureedge.net/` | +| networkPlugin | no | Specifies the network plugin implementation for the cluster. Valid values are:
`"azure"` (default), which provides an Azure native networking experience
`"kubenet"` for k8s software networking implementation.
`"flannel"` for using CoreOS Flannel
`"cilium"` for using the default Cilium CNI IPAM | +| networkPolicy | no | Specifies the network policy enforcement tool for the cluster (currently Linux-only). Valid values are:
`calico` for Calico network policy.
`cilium` for cilium network policy (Lin).
See [network policy examples](../examples/networkpolicy) for more information | +| privateCluster | no | Build a cluster without public addresses assigned. See `privateClusters` [below](#feat-private-cluster). | +| schedulerConfig | no | Configure various runtime configuration for scheduler. See `schedulerConfig` [below](#feat-scheduler-config) | +| serviceCidr | no | IP range for Service IPs, Default is "10.0.0.0/16". This range is never routed outside of a node so does not need to lie within clusterSubnet or the VNET | +| useInstanceMetadata | no | Use the Azure cloudprovider instance metadata service for appropriate resource discovery operations. Default is `true` | +| useManagedIdentity | no | Includes and uses MSI identities for all interactions with the Azure Resource Manager (ARM) API. Instead of using a static service principal written to /etc/kubernetes/azure.json, Kubernetes will use a dynamic, time-limited token fetched from the MSI extension running on master and agent nodes. This support is currently alpha and requires Kubernetes v1.9.1 or newer. (boolean - default == false) | #### addons `addons` describes various addons configuration. It is a child property of `kubernetesConfig`. Below is a list of currently available addons: -|Name of addon|Enabled by default?|How many containers|Description| -|---|---|---|---| -|tiller|true|1|Delivers the Helm server-side component: tiller. See https://github.com/kubernetes/helm for more info| -|kubernetes-dashboard|true|1|Delivers the kubernetes dashboard component. See https://github.com/kubernetes/dashboard for more info| -|rescheduler|false|1|Delivers the kubernetes rescheduler component| -|cluster-autoscaler|false|1|Delivers the kubernetes cluster autoscaler component. See https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/azure for more info| +| Name of addon | Enabled by default? | How many containers | Description | +| --------------------------------------------------------------------- | ------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| tiller | true | 1 | Delivers the Helm server-side component: tiller. See https://github.com/kubernetes/helm for more info | +| kubernetes-dashboard | true | 1 | Delivers the kubernetes dashboard component. See https://github.com/kubernetes/dashboard for more info | +| rescheduler | false | 1 | Delivers the kubernetes rescheduler component | +| [cluster-autoscaler](../examples/addons/cluster-autoscaler/README.md) | false | 1 | Delivers the kubernetes cluster autoscaler component. See https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/azure for more info | To give a bit more info on the `addons` property: We've tried to expose the basic bits of data that allow useful configuration of these cluster features. Here are some example usage patterns that will unpack what `addons` provide: @@ -167,6 +168,7 @@ Additionally above, we specified a custom docker image for tiller, let's say we Finally, the `addons.enabled` boolean property was omitted above; that's by design. If you specify a `containers` configuration, acs-engine assumes you're enabling the addon. The very first example above demonstrates a simple "enable this addon with default configuration" declaration. + #### kubeletConfig `kubeletConfig` declares runtime configuration for the kubelet running on all master and agent nodes. It is a generic key/value object, and a child property of `kubernetesConfig`. An example custom kubelet config: @@ -183,40 +185,41 @@ See [here](https://kubernetes.io/docs/reference/generated/kubelet/) for a refere Below is a list of kubelet options that acs-engine will configure by default: -|kubelet option|default value| -|---|---| -|"--cloud-config"|"/etc/kubernetes/azure.json"| -|"--cloud-provider"|"azure"| -|"--cluster-domain"|"cluster.local"| -|"--pod-infra-container-image"|"pause-amd64:*version*"| -|"--max-pods"|"30", or "110" if using kubenet --network-plugin (i.e., `"networkPlugin": "kubenet"`)| -|"--eviction-hard"|"memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%"| -|"--node-status-update-frequency"|"10s"| -|"--image-gc-high-threshold"|"85"| -|"--image-gc-low-threshold"|"850"| -|"--non-masquerade-cidr"|"10.0.0.0/8"| -|"--azure-container-registry-config"|"/etc/kubernetes/azure.json"| -|"--pod-max-pids"|"100" (need to activate the feature in --feature-gates=SupportPodPidsLimit=true)| -|"--image-pull-progress-deadline"|"30m"| -|"--feature-gates"|No default (can be a comma-separated list). On agent nodes `Accelerators=true` will be applied in the `--feature-gates` option for k8s versions before 1.11.0| - -Below is a list of kubelet options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces kubelet configuration, or because a static configuration is required to build a functional cluster: - -|kubelet option|default value| -|---|---| -|"--address"|"0.0.0.0"| -|"--allow-privileged"|"true"| -|"--pod-manifest-path"|"/etc/kubernetes/manifests"| -|"--network-plugin"|"cni"| -|"--node-labels"|(based on Azure node metadata)| -|"--cgroups-per-qos"|"true"| -|"--enforce-node-allocatable"|"pods"| -|"--kubeconfig"|"/var/lib/kubelet/kubeconfig"| -|"--register-node" (master nodes only)|"true"| -|"--register-with-taints" (master nodes only)|"node-role.kubernetes.io/master=true:NoSchedule"| -|"--keep-terminated-pod-volumes"|"false"| +| kubelet option | default value | +| ----------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| "--cloud-config" | "/etc/kubernetes/azure.json" | +| "--cloud-provider" | "azure" | +| "--cluster-domain" | "cluster.local" | +| "--pod-infra-container-image" | "pause-amd64:_version_" | +| "--max-pods" | "30", or "110" if using kubenet --network-plugin (i.e., `"networkPlugin": "kubenet"`) | +| "--eviction-hard" | "memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%" | +| "--node-status-update-frequency" | "10s" | +| "--image-gc-high-threshold" | "85" | +| "--image-gc-low-threshold" | "850" | +| "--non-masquerade-cidr" | "10.0.0.0/8" | +| "--azure-container-registry-config" | "/etc/kubernetes/azure.json" | +| "--pod-max-pids" | "100" (need to activate the feature in --feature-gates=SupportPodPidsLimit=true) | +| "--image-pull-progress-deadline" | "30m" | +| "--feature-gates" | No default (can be a comma-separated list). On agent nodes `Accelerators=true` will be applied in the `--feature-gates` option for k8s versions before 1.11.0 | + +Below is a list of kubelet options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces kubelet configuration, or because a static configuration is required to build a functional cluster: + +| kubelet option | default value | +| -------------------------------------------- | ------------------------------------------------ | +| "--address" | "0.0.0.0" | +| "--allow-privileged" | "true" | +| "--pod-manifest-path" | "/etc/kubernetes/manifests" | +| "--network-plugin" | "cni" | +| "--node-labels" | (based on Azure node metadata) | +| "--cgroups-per-qos" | "true" | +| "--enforce-node-allocatable" | "pods" | +| "--kubeconfig" | "/var/lib/kubelet/kubeconfig" | +| "--register-node" (master nodes only) | "true" | +| "--register-with-taints" (master nodes only) | "node-role.kubernetes.io/master=true:NoSchedule" | +| "--keep-terminated-pod-volumes" | "false" | + #### controllerManagerConfig `controllerManagerConfig` declares runtime configuration for the kube-controller-manager daemon running on all master nodes. Like `kubeletConfig` it is a generic key/value object, and a child property of `kubernetesConfig`. An example custom controller-manager config: @@ -236,35 +239,35 @@ See [here](https://kubernetes.io/docs/reference/generated/kube-controller-manage Below is a list of controller-manager options that acs-engine will configure by default: -|controller-manager option|default value| -|---|---| -|"--node-monitor-grace-period"|"40s"| -|"--pod-eviction-timeout"|"5m0s"| -|"--route-reconciliation-period"|"10s"| -|"--terminated-pod-gc-threshold"|"5000"| -|"--feature-gates"|No default (can be a comma-separated list)| - - -Below is a list of controller-manager options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces controller-manager configuration, or because a static configuration is required to build a functional cluster: - -|controller-manager option|default value| -|---|---| -|"--kubeconfig"|"/var/lib/kubelet/kubeconfig"| -|"--allocate-node-cidrs"|"false"| -|"--cluster-cidr"|"10.240.0.0/12"| -|"--cluster-name"|*auto-generated using api model properties*| -|"--cloud-provider"|"azure"| -|"--cloud-config"|"/etc/kubernetes/azure.json"| -|"--root-ca-file"|"/etc/kubernetes/certs/ca.crt"| -|"--cluster-signing-cert-file"|"/etc/kubernetes/certs/ca.crt"| -|"--cluster-signing-key-file"|"/etc/kubernetes/certs/ca.key"| -|"--service-account-private-key-file"|"/etc/kubernetes/certs/apiserver.key"| -|"--leader-elect"|"true"| -|"--v"|"2"| -|"--profiling"|"false"| -|"--use-service-account-credentials"|"false" ("true" if kubernetesConfig.enableRbac is true)| +| controller-manager option | default value | +| ------------------------------- | ------------------------------------------ | +| "--node-monitor-grace-period" | "40s" | +| "--pod-eviction-timeout" | "5m0s" | +| "--route-reconciliation-period" | "10s" | +| "--terminated-pod-gc-threshold" | "5000" | +| "--feature-gates" | No default (can be a comma-separated list) | + +Below is a list of controller-manager options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces controller-manager configuration, or because a static configuration is required to build a functional cluster: + +| controller-manager option | default value | +| ------------------------------------ | ------------------------------------------------------- | +| "--kubeconfig" | "/var/lib/kubelet/kubeconfig" | +| "--allocate-node-cidrs" | "false" | +| "--cluster-cidr" | "10.240.0.0/12" | +| "--cluster-name" | _auto-generated using api model properties_ | +| "--cloud-provider" | "azure" | +| "--cloud-config" | "/etc/kubernetes/azure.json" | +| "--root-ca-file" | "/etc/kubernetes/certs/ca.crt" | +| "--cluster-signing-cert-file" | "/etc/kubernetes/certs/ca.crt" | +| "--cluster-signing-key-file" | "/etc/kubernetes/certs/ca.key" | +| "--service-account-private-key-file" | "/etc/kubernetes/certs/apiserver.key" | +| "--leader-elect" | "true" | +| "--v" | "2" | +| "--profiling" | "false" | +| "--use-service-account-credentials" | "false" ("true" if kubernetesConfig.enableRbac is true) | + #### cloudControllerManagerConfig `cloudControllerManagerConfig` declares runtime configuration for the cloud-controller-manager daemon running on all master nodes in a Cloud Controller Manager configuration. Like `kubeletConfig` it is a generic key/value object, and a child property of `kubernetesConfig`. An example custom cloud-controller-manager config: @@ -281,25 +284,25 @@ See [here](https://kubernetes.io/docs/reference/generated/cloud-controller-manag Below is a list of cloud-controller-manager options that acs-engine will configure by default: -|controller-manager option|default value| -|---|---| -|"--route-reconciliation-period"|"10s"| - +| controller-manager option | default value | +| ------------------------------- | ------------- | +| "--route-reconciliation-period" | "10s" | -Below is a list of cloud-controller-manager options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces controller-manager configuration, or because a static configuration is required to build a functional cluster: +Below is a list of cloud-controller-manager options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces controller-manager configuration, or because a static configuration is required to build a functional cluster: -|controller-manager option|default value| -|---|---| -|"--kubeconfig"|"/var/lib/kubelet/kubeconfig"| -|"--allocate-node-cidrs"|"false"| -|"--cluster-cidr"|"10.240.0.0/12"| -|"--cluster-name"|*auto-generated using api model properties*| -|"--cloud-provider"|"azure"| -|"--cloud-config"|"/etc/kubernetes/azure.json"| -|"--leader-elect"|"true"| -|"--v"|"2"| +| controller-manager option | default value | +| ------------------------- | ------------------------------------------- | +| "--kubeconfig" | "/var/lib/kubelet/kubeconfig" | +| "--allocate-node-cidrs" | "false" | +| "--cluster-cidr" | "10.240.0.0/12" | +| "--cluster-name" | _auto-generated using api model properties_ | +| "--cloud-provider" | "azure" | +| "--cloud-config" | "/etc/kubernetes/azure.json" | +| "--leader-elect" | "true" | +| "--v" | "2" | + #### apiServerConfig `apiServerConfig` declares runtime configuration for the kube-apiserver daemon running on all master nodes. Like `kubeletConfig` and `controllerManagerConfig` it is a generic key/value object, and a child property of `kubernetesConfig`. An example custom apiserver config: @@ -311,6 +314,7 @@ Below is a list of cloud-controller-manager options that are *not* currently use } } ``` + Or perhaps you want to customize/override the set of admission-control flags passed to the API Server by default, you can omit the options you don't want and specify only the ones you need as follows: ``` @@ -329,63 +333,63 @@ See [here](https://kubernetes.io/docs/reference/generated/kube-apiserver/) for a Below is a list of apiserver options that acs-engine will configure by default: -|apiserver option|default value| -|---|---| -|"--admission-control"|"NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" (Kubernetes versions prior to 1.9.0| -|"--enable-admission-plugins"`*`|"NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" (Kubernetes versions 1.9.0 and later| -|"--authorization-mode"|"Node", "RBAC" (*the latter if enabledRbac is true*)| -|"--audit-log-maxage"|"30"| -|"--audit-log-maxbackup"|"10"| -|"--audit-log-maxsize"|"100"| -|"--feature-gates"|No default (can be a comma-separated list)| -|"--oidc-username-claim"|"oid" (*if has AADProfile*)| -|"--oidc-groups-claim"|"groups" (*if has AADProfile*)| -|"--oidc-client-id"|*calculated value that represents OID client ID* (*if has AADProfile*)| -|"--oidc-issuer-url"|*calculated value that represents OID issuer URL* (*if has AADProfile*)| +| apiserver option | default value | +| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| "--admission-control" | "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" (Kubernetes versions prior to 1.9.0 | +| "--enable-admission-plugins"`*` | "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" (Kubernetes versions 1.9.0 and later | +| "--authorization-mode" | "Node", "RBAC" (_the latter if enabledRbac is true_) | +| "--audit-log-maxage" | "30" | +| "--audit-log-maxbackup" | "10" | +| "--audit-log-maxsize" | "100" | +| "--feature-gates" | No default (can be a comma-separated list) | +| "--oidc-username-claim" | "oid" (_if has AADProfile_) | +| "--oidc-groups-claim" | "groups" (_if has AADProfile_) | +| "--oidc-client-id" | _calculated value that represents OID client ID_ (_if has AADProfile_) | +| "--oidc-issuer-url" | _calculated value that represents OID issuer URL_ (_if has AADProfile_) | `*` In Kubernetes versions 1.10.0 and later the `--admission-control` flag is deprecated and `--enable-admission-plugins` is used in its stead. - -Below is a list of apiserver options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces apiserver configuration, or because a static configuration is required to build a functional cluster: - -|apiserver option|default value| -|---|---| -|"--bind-address"|"0.0.0.0"| -|"--advertise-address"|*calculated value that represents listening URI for API server*| -|"--allow-privileged"|"true"| -|"--anonymous-auth"|"false| -|"--audit-log-path"|"/var/log/apiserver/audit.log"| -|"--insecure-port"|"8080"| -|"--secure-port"|"443"| -|"--service-account-lookup"|"true"| -|"--etcd-cafile"|"/etc/kubernetes/certs/ca.crt"| -|"--etcd-certfile"|"/etc/kubernetes/certs/etcdclient.crt"| -|"--etcd-keyfile"|"/etc/kubernetes/certs/etcdclient.key"| -|"--etcd-servers"|*calculated value that represents etcd servers*| -|"--profiling"|"false"| -|"--repair-malformed-updates"|"false"| -|"--tls-cert-file"|"/etc/kubernetes/certs/apiserver.crt"| -|"--tls-private-key-file"|"/etc/kubernetes/certs/apiserver.key"| -|"--client-ca-file"|"/etc/kubernetes/certs/ca.crt"| -|"--service-account-key-file"|"/etc/kubernetes/certs/apiserver.key"| -|"--kubelet-client-certificate"|"/etc/kubernetes/certs/client.crt"| -|"--kubelet-client-key"|"/etc/kubernetes/certs/client.key"| -|"--service-cluster-ip-range"|*see serviceCIDR*| -|"--storage-backend"|*calculated value that represents etcd version*| -|"--v"|"4"| -|"--experimental-encryption-provider-config"|"/etc/kubernetes/encryption-config.yaml" (*if enableDataEncryptionAtRest is true*)| -|"--experimental-encryption-provider-config"|"/etc/kubernetes/encryption-config.yaml" (*if enableEncryptionWithExternalKms is true*)| -|"--requestheader-client-ca-file"|"/etc/kubernetes/certs/proxy-ca.crt" (*if enableAggregatedAPIs is true*)| -|"--proxy-client-cert-file"|"/etc/kubernetes/certs/proxy.crt" (*if enableAggregatedAPIs is true*)| -|"--proxy-client-key-file"|"/etc/kubernetes/certs/proxy.key" (*if enableAggregatedAPIs is true*)| -|"--requestheader-allowed-names"|"" (*if enableAggregatedAPIs is true*)| -|"--requestheader-extra-headers-prefix"|"X-Remote-Extra-" (*if enableAggregatedAPIs is true*)| -|"--requestheader-group-headers"|"X-Remote-Group" (*if enableAggregatedAPIs is true*)| -|"--requestheader-username-headers"|"X-Remote-User" (*if enableAggregatedAPIs is true*)| -|"--cloud-provider"|"azure" (*unless useCloudControllerManager is true*)| -|"--cloud-config"|"/etc/kubernetes/azure.json" (*unless useCloudControllerManager is true*)| +Below is a list of apiserver options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces apiserver configuration, or because a static configuration is required to build a functional cluster: + +| apiserver option | default value | +| ------------------------------------------- | --------------------------------------------------------------------------------------- | +| "--bind-address" | "0.0.0.0" | +| "--advertise-address" | _calculated value that represents listening URI for API server_ | +| "--allow-privileged" | "true" | +| "--anonymous-auth" | "false | +| "--audit-log-path" | "/var/log/apiserver/audit.log" | +| "--insecure-port" | "8080" | +| "--secure-port" | "443" | +| "--service-account-lookup" | "true" | +| "--etcd-cafile" | "/etc/kubernetes/certs/ca.crt" | +| "--etcd-certfile" | "/etc/kubernetes/certs/etcdclient.crt" | +| "--etcd-keyfile" | "/etc/kubernetes/certs/etcdclient.key" | +| "--etcd-servers" | _calculated value that represents etcd servers_ | +| "--profiling" | "false" | +| "--repair-malformed-updates" | "false" | +| "--tls-cert-file" | "/etc/kubernetes/certs/apiserver.crt" | +| "--tls-private-key-file" | "/etc/kubernetes/certs/apiserver.key" | +| "--client-ca-file" | "/etc/kubernetes/certs/ca.crt" | +| "--service-account-key-file" | "/etc/kubernetes/certs/apiserver.key" | +| "--kubelet-client-certificate" | "/etc/kubernetes/certs/client.crt" | +| "--kubelet-client-key" | "/etc/kubernetes/certs/client.key" | +| "--service-cluster-ip-range" | _see serviceCIDR_ | +| "--storage-backend" | _calculated value that represents etcd version_ | +| "--v" | "4" | +| "--experimental-encryption-provider-config" | "/etc/kubernetes/encryption-config.yaml" (_if enableDataEncryptionAtRest is true_) | +| "--experimental-encryption-provider-config" | "/etc/kubernetes/encryption-config.yaml" (_if enableEncryptionWithExternalKms is true_) | +| "--requestheader-client-ca-file" | "/etc/kubernetes/certs/proxy-ca.crt" (_if enableAggregatedAPIs is true_) | +| "--proxy-client-cert-file" | "/etc/kubernetes/certs/proxy.crt" (_if enableAggregatedAPIs is true_) | +| "--proxy-client-key-file" | "/etc/kubernetes/certs/proxy.key" (_if enableAggregatedAPIs is true_) | +| "--requestheader-allowed-names" | "" (_if enableAggregatedAPIs is true_) | +| "--requestheader-extra-headers-prefix" | "X-Remote-Extra-" (_if enableAggregatedAPIs is true_) | +| "--requestheader-group-headers" | "X-Remote-Group" (_if enableAggregatedAPIs is true_) | +| "--requestheader-username-headers" | "X-Remote-User" (_if enableAggregatedAPIs is true_) | +| "--cloud-provider" | "azure" (_unless useCloudControllerManager is true_) | +| "--cloud-config" | "/etc/kubernetes/azure.json" (_unless useCloudControllerManager is true_) | + #### schedulerConfig `schedulerConfig` declares runtime configuration for the kube-scheduler daemon running on all master nodes. Like `kubeletConfig`, `controllerManagerConfig`, and `apiServerConfig` it is a generic key/value object, and a child property of `kubernetesConfig`. An example custom apiserver config: @@ -402,115 +406,119 @@ See [here](https://kubernetes.io/docs/reference/generated/kube-scheduler/) for a Below is a list of scheduler options that acs-engine will configure by default: -|kube-scheduler option|default value| -|---|---| -|"--v"|"2"| -|"--feature-gates"|No default (can be a comma-separated list)| +| kube-scheduler option | default value | +| --------------------- | ------------------------------------------ | +| "--v" | "2" | +| "--feature-gates" | No default (can be a comma-separated list) | +Below is a list of kube-scheduler options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces kube-scheduler configuration, or because a static configuration is required to build a functional cluster: -Below is a list of kube-scheduler options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces kube-scheduler configuration, or because a static configuration is required to build a functional cluster: - -|kube-scheduler option|default value| -|---|---| -|"--kubeconfig"|"/var/lib/kubelet/kubeconfig"| -|"--leader-elect"|"true"| -|"--profiling"|"false"| +| kube-scheduler option | default value | +| --------------------- | ----------------------------- | +| "--kubeconfig" | "/var/lib/kubelet/kubeconfig" | +| "--leader-elect" | "true" | +| "--profiling" | "false" | We consider `kubeletConfig`, `controllerManagerConfig`, `apiServerConfig`, and `schedulerConfig` to be generic conveniences that add power/flexibility to cluster deployments. Their usage comes with no operational guarantees! They are manual tuning features that enable low-level configuration of a kubernetes cluster. + #### privateCluster `privateCluster` defines a cluster without public addresses assigned. It is a child property of `kubernetesConfig`. -|Name|Required|Description| -|---|---|---| -|enabled|no|Enable [Private Cluster](./kubernetes/features.md/#feat-private-cluster) (boolean - default == false) | -|jumpboxProfile|no|Configure and auto-provision a jumpbox to access your private cluster. `jumpboxProfile` is ignored if enabled is `false`. See `jumpboxProfile` below| +| Name | Required | Description | +| -------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | +| enabled | no | Enable [Private Cluster](./kubernetes/features.md/#feat-private-cluster) (boolean - default == false) | +| jumpboxProfile | no | Configure and auto-provision a jumpbox to access your private cluster. `jumpboxProfile` is ignored if enabled is `false`. See `jumpboxProfile` below | #### jumpboxProfile `jumpboxProfile` describes the settings for a jumpbox deployed via acs-engine to access a private cluster. It is a child property of `privateCluster`. -|Name|Required|Description| -|---|---|---| -|name|yes|This is the unique name for the jumpbox VM. Some resources deployed with the jumpbox are derived from this name| -|vmSize|yes|Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/)| -|publicKey|yes|The public SSH key used for authenticating access to the jumpbox. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation)| -|osDiskSizeGB|no|Describes the OS Disk Size in GB. Defaults to `30`| -|storageProfile|no|Specifies the storage profile to use. Valid values are [ManagedDisks](../examples/disks-managed) or [StorageAccount](../examples/disks-storageaccount). Defaults to `ManagedDisks`| -|username|no|Describes the admin username to be used on the jumpbox. Defaults to `azureuser`| +| Name | Required | Description | +| -------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| name | yes | This is the unique name for the jumpbox VM. Some resources deployed with the jumpbox are derived from this name | +| vmSize | yes | Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/) | +| publicKey | yes | The public SSH key used for authenticating access to the jumpbox. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation) | +| osDiskSizeGB | no | Describes the OS Disk Size in GB. Defaults to `30` | +| storageProfile | no | Specifies the storage profile to use. Valid values are [ManagedDisks](../examples/disks-managed) or [StorageAccount](../examples/disks-storageaccount). Defaults to `ManagedDisks` | +| username | no | Describes the admin username to be used on the jumpbox. Defaults to `azureuser` | ### masterProfile + `masterProfile` describes the settings for master configuration. -|Name|Required|Description| -|---|---|---| -|count|yes|Masters have count value of 1, 3, or 5 masters| -|dnsPrefix|yes|The dns prefix for the master FQDN. The master FQDN is used for SSH or commandline access. This must be a unique name. ([bring your own VNET examples](../examples/vnet))| -|subjectAltNames|no|An array of fully qualified domain names using which a user can reach API server. These domains are added as Subject Alternative Names to the generated API server certificate. **NOTE**: These domains **will not** be automatically provisioned.| -|firstConsecutiveStaticIP|only required when vnetSubnetId specified|The IP address of the first master. IP Addresses will be assigned consecutively to additional master nodes| -|vmsize|yes|Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores and 100GB of ephemeral disk space| -|osDiskSizeGB|no|Describes the OS Disk Size in GB| -|vnetSubnetId|no|Specifies the Id of an alternate VNET subnet. The subnet id must specify a valid VNET ID owned by the same subscription. ([bring your own VNET examples](../examples/vnet))| -|extensions|no|This is an array of extensions. This indicates that the extension be run on a single master. The name in the extensions array must exactly match the extension name in the extensionProfiles| -|vnetCidr|no|Specifies the VNET cidr when using a custom VNET ([bring your own VNET examples](../examples/vnet))| -|imageReference.name|no|The name of the Linux OS image. Needs to be used in conjunction with resourceGroup, below| -|imageReference.resourceGroup|no|Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above| -|distro|no|Select Master(s) Operating System (Linux only). Currently supported values are: `ubuntu` and `coreos` (CoreOS support is currently experimental). Defaults to `ubuntu` if undefined. Currently supported OS and orchestrator configurations -- `ubuntu`: DCOS, Docker Swarm, Kubernetes; `RHEL`: OpenShift; `coreos`: Kubernetes. [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json)| -|customFiles|no|The custom files to be provisioned to the master nodes. Defined as an array of json objects with each defined as `"source":"absolute-local-path", "dest":"absolute-path-on-masternodes"`.[See examples](../examples/customfiles) | +| Name | Required | Description | +| ---------------------------- | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| count | yes | Masters have count value of 1, 3, or 5 masters | +| dnsPrefix | yes | The dns prefix for the master FQDN. The master FQDN is used for SSH or commandline access. This must be a unique name. ([bring your own VNET examples](../examples/vnet)) | +| subjectAltNames | no | An array of fully qualified domain names using which a user can reach API server. These domains are added as Subject Alternative Names to the generated API server certificate. **NOTE**: These domains **will not** be automatically provisioned. | +| firstConsecutiveStaticIP | only required when vnetSubnetId specified | The IP address of the first master. IP Addresses will be assigned consecutively to additional master nodes | +| vmsize | yes | Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores and 100GB of ephemeral disk space | +| osDiskSizeGB | no | Describes the OS Disk Size in GB | +| vnetSubnetId | no | Specifies the Id of an alternate VNET subnet. The subnet id must specify a valid VNET ID owned by the same subscription. ([bring your own VNET examples](../examples/vnet)) | +| extensions | no | This is an array of extensions. This indicates that the extension be run on a single master. The name in the extensions array must exactly match the extension name in the extensionProfiles | +| vnetCidr | no | Specifies the VNET cidr when using a custom VNET ([bring your own VNET examples](../examples/vnet)) | +| imageReference.name | no | The name of the Linux OS image. Needs to be used in conjunction with resourceGroup, below | +| imageReference.resourceGroup | no | Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above | +| distro | no | Select Master(s) Operating System (Linux only). Currently supported values are: `ubuntu` and `coreos` (CoreOS support is currently experimental). Defaults to `ubuntu` if undefined. Currently supported OS and orchestrator configurations -- `ubuntu`: DCOS, Docker Swarm, Kubernetes; `RHEL`: OpenShift; `coreos`: Kubernetes. [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json) | +| customFiles | no | The custom files to be provisioned to the master nodes. Defined as an array of json objects with each defined as `"source":"absolute-local-path", "dest":"absolute-path-on-masternodes"`.[See examples](../examples/customfiles) | ### agentPoolProfiles + A cluster can have 0 to 12 agent pool profiles. Agent Pool Profiles are used for creating agents with different capabilities such as VMSizes, VMSS or Availability Set, Public/Private access, user-defined OS Images, [attached storage disks](../examples/disks-storageaccount), [attached managed disks](../examples/disks-managed), or [Windows](../examples/windows). -|Name|Required|Description| -|---|---|---| -|availabilityProfile|no|Supported values are `VirtualMachineScaleSets` (default, except for Kubernetes clusters before version 1.10) and `AvailabilitySet`.| -|count|yes|Describes the node count| -|scaleSetPriority|no|Supported values are `Regular` (default) and `Low`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. Enables the usage of [Low-priority VMs on Scale Sets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-use-low-priority).| -|scaleSetEvictionPolicy|no|Supported values are `Delete` (default) and `Deallocate`. Only applies to clusters with availabilityProfile of `VirtualMachineScaleSets` and scaleSetPriority of `Low`.| -|diskSizesGB|no|Describes an array of up to 4 attached disk sizes. Valid disk size values are between 1 and 1024| -|dnsPrefix|Required if agents are to be exposed publically with a load balancer|The dns prefix that forms the FQDN to access the loadbalancer for this agent pool. This must be a unique name among all agent pools. Not supported for Kubernetes clusters| -|name|yes|This is the unique name for the agent pool profile. The resources of the agent pool profile are derived from this name| -|ports|only required if needed for exposing services publically|Describes an array of ports need for exposing publically. A tcp probe is configured for each port and only opens to an agent node if the agent node is listening on that port. A maximum of 150 ports may be specified. Not supported for Kubernetes clusters| -|storageProfile|no|Specifies the storage profile to use. Valid values are [ManagedDisks](../examples/disks-managed) or [StorageAccount](../examples/disks-storageaccount). Defaults to `ManagedDisks`| -|vmsize|yes|Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores| -|osDiskSizeGB|no|Describes the OS Disk Size in GB| -|vnetSubnetId|no|Specifies the Id of an alternate VNET subnet. The subnet id must specify a valid VNET ID owned by the same subscription. ([bring your own VNET examples](../examples/vnet))| -|imageReference.name|no|The name of a a Linux OS image. Needs to be used in conjunction with resourceGroup, below| -|imageReference.resourceGroup|no|Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above| -|osType|no|Specifies the agent pool's Operating System. Supported values are `Windows` and `Linux`. Defaults to `Linux`| -|distro|no|Specifies the agent pool's Linux distribution. Supported values are `ubuntu` and `coreos` (CoreOS support is currently experimental). Defaults to `ubuntu` if undefined, unless `osType` is defined as `Windows` (in which case `distro` is unused). Currently supported OS and orchestrator configurations -- `ubuntu`: DCOS, Docker Swarm, Kubernetes; `RHEL`: OpenShift; `coreos`: Kubernetes. [Example of CoreOS Master with Windows and Linux (CoreOS and Ubuntu) Agents](../examples/coreos/kubernetes-coreos-hybrid.json) | -|acceleratedNetworkingEnabled|no|Use [Azure Accelerated Networking](https://azure.microsoft.com/en-us/blog/maximize-your-vm-s-performance-with-accelerated-networking-now-generally-available-for-both-windows-and-linux/) feature for agents (You must select a VM SKU that support Accelerated Networking)| +| Name | Required | Description | +| ---------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| availabilityProfile | no | Supported values are `VirtualMachineScaleSets` (default, except for Kubernetes clusters before version 1.10) and `AvailabilitySet`. | +| count | yes | Describes the node count | +| scaleSetPriority | no | Supported values are `Regular` (default) and `Low`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. Enables the usage of [Low-priority VMs on Scale Sets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-use-low-priority). | +| scaleSetEvictionPolicy | no | Supported values are `Delete` (default) and `Deallocate`. Only applies to clusters with availabilityProfile of `VirtualMachineScaleSets` and scaleSetPriority of `Low`. | +| diskSizesGB | no | Describes an array of up to 4 attached disk sizes. Valid disk size values are between 1 and 1024 | +| dnsPrefix | Required if agents are to be exposed publically with a load balancer | The dns prefix that forms the FQDN to access the loadbalancer for this agent pool. This must be a unique name among all agent pools. Not supported for Kubernetes clusters | +| name | yes | This is the unique name for the agent pool profile. The resources of the agent pool profile are derived from this name | +| ports | only required if needed for exposing services publically | Describes an array of ports need for exposing publically. A tcp probe is configured for each port and only opens to an agent node if the agent node is listening on that port. A maximum of 150 ports may be specified. Not supported for Kubernetes clusters | +| storageProfile | no | Specifies the storage profile to use. Valid values are [ManagedDisks](../examples/disks-managed) or [StorageAccount](../examples/disks-storageaccount). Defaults to `ManagedDisks` | +| vmsize | yes | Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores | +| osDiskSizeGB | no | Describes the OS Disk Size in GB | +| vnetSubnetId | no | Specifies the Id of an alternate VNET subnet. The subnet id must specify a valid VNET ID owned by the same subscription. ([bring your own VNET examples](../examples/vnet)) | +| imageReference.name | no | The name of a a Linux OS image. Needs to be used in conjunction with resourceGroup, below | +| imageReference.resourceGroup | no | Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above | +| osType | no | Specifies the agent pool's Operating System. Supported values are `Windows` and `Linux`. Defaults to `Linux` | +| distro | no | Specifies the agent pool's Linux distribution. Supported values are `ubuntu` and `coreos` (CoreOS support is currently experimental). Defaults to `ubuntu` if undefined, unless `osType` is defined as `Windows` (in which case `distro` is unused). Currently supported OS and orchestrator configurations -- `ubuntu`: DCOS, Docker Swarm, Kubernetes; `RHEL`: OpenShift; `coreos`: Kubernetes. [Example of CoreOS Master with Windows and Linux (CoreOS and Ubuntu) Agents](../examples/coreos/kubernetes-coreos-hybrid.json) | +| acceleratedNetworkingEnabled | no | Use [Azure Accelerated Networking](https://azure.microsoft.com/en-us/blog/maximize-your-vm-s-performance-with-accelerated-networking-now-generally-available-for-both-windows-and-linux/) feature for agents (You must select a VM SKU that support Accelerated Networking) | ### linuxProfile `linuxProfile` provides the linux configuration for each linux node in the cluster -|Name|Required|Description| -|---|---|---| -|adminUsername|yes|Describes the username to be used on all linux clusters| -|ssh.publicKeys.keyData|yes|The public SSH key used for authenticating access to all Linux nodes in the cluster. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation)| -|secrets|no|Specifies an array of key vaults to pull secrets from and what secrets to pull from each| -|customSearchDomain.name|no|describes the search domain to be used on all linux clusters| -|customSearchDomain.realmUser|no|describes the realm user with permissions to update dns registries on Windows Server DNS| -|customSearchDomain.realmPassword|no|describes the realm user password to update dns registries on Windows Server DNS| -|customNodesDNS.dnsServer|no|describes the IP address of the DNS Server| +| Name | Required | Description | +| -------------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| adminUsername | yes | Describes the username to be used on all linux clusters | +| ssh.publicKeys.keyData | yes | The public SSH key used for authenticating access to all Linux nodes in the cluster. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation) | +| secrets | no | Specifies an array of key vaults to pull secrets from and what secrets to pull from each | +| customSearchDomain.name | no | describes the search domain to be used on all linux clusters | +| customSearchDomain.realmUser | no | describes the realm user with permissions to update dns registries on Windows Server DNS | +| customSearchDomain.realmPassword | no | describes the realm user password to update dns registries on Windows Server DNS | +| customNodesDNS.dnsServer | no | describes the IP address of the DNS Server | #### secrets + `secrets` details which certificates to install on the masters and nodes in the cluster. A cluster can have a list of key vaults to install certs from. On linux boxes the certs are saved on under the directory "/var/lib/waagent/". 2 files are saved per certificate: -1. `{thumbprint}.crt` : this is the full cert chain saved in PEM format -2. `{thumbprint}.prv` : this is the private key saved in PEM format +1. `{thumbprint}.crt` : this is the full cert chain saved in PEM format +2. `{thumbprint}.prv` : this is the private key saved in PEM format + +| Name | Required | Description | +| -------------------------------- | -------- | ------------------------------------------------------------------- | +| sourceVault.id | yes | The azure resource manager id of the key vault to pull secrets from | +| vaultCertificates.certificateUrl | yes | Keyvault URL to this cert including the version | -|Name|Required|Description| -|---|---|---| -|sourceVault.id|yes|The azure resource manager id of the key vault to pull secrets from| -|vaultCertificates.certificateUrl|yes|Keyvault URL to this cert including the version| format for `sourceVault.id`, can be obtained in cli, or found in the portal: /subscriptions/{subscription-id}/resourceGroups/{resource-group}/providers/Microsoft.KeyVault/vaults/{keyvaultname} format for `vaultCertificates.certificateUrl`, can be obtained in cli, or found in the portal: @@ -518,90 +526,95 @@ https://{keyvaultname}.vault.azure.net:443/secrets/{secretName}/{version} ### servicePrincipalProfile -`servicePrincipalProfile` describes an Azure Service credentials to be used by the cluster for self-configuration. See [service principal](serviceprincipal.md) for more details on creation. - -|Name|Required|Description| -|---|---|---| -|clientId|yes, for Kubernetes clusters|describes the Azure client id. It is recommended to use a separate client ID per cluster| -|secret|yes, for Kubernetes clusters|describes the Azure client secret. It is recommended to use a separate client secret per client id| -|objectId|optional, for Kubernetes clusters|describes the Azure service principal object id. It is required if enableEncryptionWithExternalKms is true| -|keyvaultSecretRef.vaultId|no, for Kubernetes clusters|describes the vault id of the keyvault to retrieve the service principal secret from. See below for format.| -|keyvaultSecretRef.secretName|no, for Kubernetes clusters|describes the name of the service principal secret in keyvault| -|keyvaultSecretRef.version|no, for Kubernetes clusters|describes the version of the secret to use| +`servicePrincipalProfile` describes an Azure Service credentials to be used by the cluster for self-configuration. See [service principal](serviceprincipal.md) for more details on creation. +| Name | Required | Description | +| ---------------------------- | --------------------------------- | ----------------------------------------------------------------------------------------------------------- | +| clientId | yes, for Kubernetes clusters | describes the Azure client id. It is recommended to use a separate client ID per cluster | +| secret | yes, for Kubernetes clusters | describes the Azure client secret. It is recommended to use a separate client secret per client id | +| objectId | optional, for Kubernetes clusters | describes the Azure service principal object id. It is required if enableEncryptionWithExternalKms is true | +| keyvaultSecretRef.vaultId | no, for Kubernetes clusters | describes the vault id of the keyvault to retrieve the service principal secret from. See below for format. | +| keyvaultSecretRef.secretName | no, for Kubernetes clusters | describes the name of the service principal secret in keyvault | +| keyvaultSecretRef.version | no, for Kubernetes clusters | describes the version of the secret to use | format for `keyvaultSecretRef.vaultId`, can be obtained in cli, or found in the portal: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults/`. See [keyvault params](../examples/keyvault-params/README.md#service-principal-profile) for an example. ## Cluster Defintions for apiVersion "2016-03-30" -Here are the cluster definitions for apiVersion "2016-03-30". This matches the api version of the Azure Container Service Engine. +Here are the cluster definitions for apiVersion "2016-03-30". This matches the api version of the Azure Container Service Engine. ### apiVersion -|Name|Required|Description| -|---|---|---| -|apiVersion|yes|The version of the template. For "2016-03-30" the value is "2016-03-30"| +| Name | Required | Description | +| ---------- | -------- | ----------------------------------------------------------------------- | +| apiVersion | yes | The version of the template. For "2016-03-30" the value is "2016-03-30" | ### orchestratorProfile + `orchestratorProfile` describes the orchestrator settings. -|Name|Required|Description| -|---|---|---| -|orchestratorType|yes|Specifies the orchestrator type for the cluster| +| Name | Required | Description | +| ---------------- | -------- | ----------------------------------------------- | +| orchestratorType | yes | Specifies the orchestrator type for the cluster | Here are the valid values for the orchestrator types: -1. `DCOS` - this represents the [DC/OS orchestrator](dcos.md). -2. `Swarm` - this represents the [Swarm orchestrator](swarm.md). -3. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md). -4. `Swarm Mode` - this represents the [Swarm Mode orchestrator](swarmmode.md). -5. `OpenShift` - this represents the [OpenShift orchestrator](openshift.md) +1. `DCOS` - this represents the [DC/OS orchestrator](dcos.md). +2. `Swarm` - this represents the [Swarm orchestrator](swarm.md). +3. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md). +4. `Swarm Mode` - this represents the [Swarm Mode orchestrator](swarmmode.md). +5. `OpenShift` - this represents the [OpenShift orchestrator](openshift.md) ### masterProfile + `masterProfile` describes the settings for master configuration. -|Name|Required|Description| -|---|---|---| -|count|yes|Masters have count value of 1, 3, or 5 masters| -|dnsPrefix|yes|The dns prefix for the masters FQDN. The master FQDN is used for SSH or commandline access. This must be a unique name. ([bring your own VNET examples](../examples/vnet))| +| Name | Required | Description | +| --------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| count | yes | Masters have count value of 1, 3, or 5 masters | +| dnsPrefix | yes | The dns prefix for the masters FQDN. The master FQDN is used for SSH or commandline access. This must be a unique name. ([bring your own VNET examples](../examples/vnet)) | ### agentPoolProfiles + For apiVersion "2016-03-30", a cluster may have only 1 agent pool profiles. -|Name|Required|Description| -|---|---|---| -|count|yes|Describes the node count| -|dnsPrefix|required if agents are to be exposed publically with a load balancer|The dns prefix that forms the FQDN to access the loadbalancer for this agent pool. This must be a unique name among all agent pools| -|name|yes|The unique name for the agent pool profile. The resources of the agent pool profile are derived from this name| -|vmsize|yes|Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores| +| Name | Required | Description | +| --------- | -------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| count | yes | Describes the node count | +| dnsPrefix | required if agents are to be exposed publically with a load balancer | The dns prefix that forms the FQDN to access the loadbalancer for this agent pool. This must be a unique name among all agent pools | +| name | yes | The unique name for the agent pool profile. The resources of the agent pool profile are derived from this name | +| vmsize | yes | Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores | ### linuxProfile `linuxProfile` provides the linux configuration for each linux node in the cluster -|Name|Required|Description| -|---|---|---| -|adminUsername|yes|Describes the username to be used on all linux clusters| -|ssh.publicKeys[0].keyData|yes|The public SSH key used for authenticating access to all Linux nodes in the cluster. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation)| +| Name | Required | Description | +| ------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| adminUsername | yes | Describes the username to be used on all linux clusters | +| ssh.publicKeys[0].keyData | yes | The public SSH key used for authenticating access to all Linux nodes in the cluster. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation) | + ### aadProfile `aadProfile` provides [Azure Active Directory integration](kubernetes.aad.md) configuration for the cluster, currently only available for Kubernetes orchestrator. -|Name|Required|Description| -|---|---|---| -|clientAppID|yes|Describes the client AAD application ID| -|serverAppID|yes|Describes the server AAD application ID| -|adminGroupID|no|Describes the AAD Group Object ID that will be assigned the cluster-admin RBAC role| -|tenantID|no|Describes the AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription| +| Name | Required | Description | +| ------------ | -------- | --------------------------------------------------------------------------------------------------------------------------- | +| clientAppID | yes | Describes the client AAD application ID | +| serverAppID | yes | Describes the server AAD application ID | +| adminGroupID | no | Describes the AAD Group Object ID that will be assigned the cluster-admin RBAC role | +| tenantID | no | Describes the AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription | + ### extensionProfiles -A cluster can have 0 - N extensions in extension profiles. Extension profiles allow a user to easily add pre-packaged functionality into a cluster. An example would be configuring a monitoring solution on your cluster. You can think of extensions like a marketplace for acs clusters. - -|Name|Required|Description| -|---|---|---| -|name|yes|The name of the extension. This has to exactly match the name of a folder under the extensions folder| -|version|yes|The version of the extension. This has to exactly match the name of the folder under the extension name folder| -|extensionParameters|optional|Extension parameters may be required by extensions. The format of the parameters is also extension dependant| -|rootURL|optional|URL to the root location of extensions. The rootURL must have an extensions child folder that follows the extensions convention. The rootURL is mainly used for testing purposes| + +A cluster can have 0 - N extensions in extension profiles. Extension profiles allow a user to easily add pre-packaged functionality into a cluster. An example would be configuring a monitoring solution on your cluster. You can think of extensions like a marketplace for acs clusters. + +| Name | Required | Description | +| ------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| name | yes | The name of the extension. This has to exactly match the name of a folder under the extensions folder | +| version | yes | The version of the extension. This has to exactly match the name of the folder under the extension name folder | +| extensionParameters | optional | Extension parameters may be required by extensions. The format of the parameters is also extension dependant | +| rootURL | optional | URL to the root location of extensions. The rootURL must have an extensions child folder that follows the extensions convention. The rootURL is mainly used for testing purposes | You can find more information, as well as a list of extensions on the [extensions documentation](extensions.md). diff --git a/examples/addons/cluster-autoscaler/README.md b/examples/addons/cluster-autoscaler/README.md index 5827da4197..507eaf905b 100644 --- a/examples/addons/cluster-autoscaler/README.md +++ b/examples/addons/cluster-autoscaler/README.md @@ -2,8 +2,8 @@ [Cluster Autoscaler](https://github.com/kubernetes/autoscaler) is a tool that automatically adjusts the size of the Kubernetes cluster when: -* there are pods that failed to run in the cluster due to insufficient resources. -* some nodes in the cluster are so underutilized, for an extended period of time, that they can be deleted and their pods will be easily placed on some other, existing nodes. +- there are pods that failed to run in the cluster due to insufficient resources. +- some nodes in the cluster are so underutilized, for an extended period of time, that they can be deleted and their pods will be easily placed on some other, existing nodes. This is the Kubernetes Cluster Autoscaler add-on for Virtual Machine Scale Sets. Add this add-on to your json file as shown below to automatically enable cluster autoscaler in your new Kubernetes cluster. @@ -85,4 +85,4 @@ Follow the README at https://github.com/kubernetes/autoscaler/tree/master/cluste ## Supported Orchestrators -* Kubernetes +- Kubernetes diff --git a/pkg/acsengine/k8s_versions.go b/pkg/acsengine/k8s_versions.go index b917303826..f889136508 100644 --- a/pkg/acsengine/k8s_versions.go +++ b/pkg/acsengine/k8s_versions.go @@ -24,6 +24,7 @@ var k8sComponentVersions = map[string]map[string]string{ "aci-connector": "virtual-kubelet:latest", ContainerMonitoringAddonName: "oms:ciprod05082018", AzureCNINetworkMonitoringAddonName: "networkmonitor:v0.0.4", + "cluster-autoscaler": "cluster-autoscaler:v1.3.0", "nodestatusfreq": DefaultKubernetesNodeStatusUpdateFrequency, "nodegraceperiod": DefaultKubernetesCtrlMgrNodeMonitorGracePeriod, "podeviction": DefaultKubernetesCtrlMgrPodEvictionTimeout, @@ -263,6 +264,7 @@ func getK8sVersionComponents(version string, overrides map[string]string) map[st DefaultACIConnectorAddonName: k8sComponentVersions["1.11"]["aci-connector"], ContainerMonitoringAddonName: k8sComponentVersions["1.11"][ContainerMonitoringAddonName], AzureCNINetworkMonitoringAddonName: k8sComponentVersions["1.11"][AzureCNINetworkMonitoringAddonName], + DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.11"]["cluster-autoscaler"], "nodestatusfreq": k8sComponentVersions["1.11"]["nodestatusfreq"], "nodegraceperiod": k8sComponentVersions["1.11"]["nodegraceperiod"], "podeviction": k8sComponentVersions["1.11"]["podeviction"], diff --git a/pkg/acsengine/k8s_versions_test.go b/pkg/acsengine/k8s_versions_test.go index a9038d0da7..c4821ee55d 100644 --- a/pkg/acsengine/k8s_versions_test.go +++ b/pkg/acsengine/k8s_versions_test.go @@ -29,6 +29,7 @@ func TestGetK8sVersionComponents(t *testing.T) { DefaultACIConnectorAddonName: k8sComponentVersions["1.11"]["aci-connector"], ContainerMonitoringAddonName: k8sComponentVersions["1.11"][ContainerMonitoringAddonName], AzureCNINetworkMonitoringAddonName: k8sComponentVersions["1.11"][AzureCNINetworkMonitoringAddonName], + DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.11"]["cluster-autoscaler"], "nodestatusfreq": k8sComponentVersions["1.11"]["nodestatusfreq"], "nodegraceperiod": k8sComponentVersions["1.11"]["nodegraceperiod"], "podeviction": k8sComponentVersions["1.11"]["podeviction"], From aafdbd1c5e84284ec80b64d7f76ed2b3776872c6 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Fri, 22 Jun 2018 09:32:16 -0700 Subject: [PATCH 15/74] updating the agent image to use the latest version for testing (#3336) --- pkg/acsengine/defaults.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index 8130d3369f..3df0548054 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -326,7 +326,7 @@ var ( Containers: []api.KubernetesContainerSpec{ { Name: "omsagent", - Image: "microsoft/oms:ciprod06072018", + Image: "microsoft/oms:June21st", CPURequests: "50m", MemoryRequests: "100Mi", CPULimits: "150m", From ee9f711696c1f94ba9851790e916b6b6a29a804d Mon Sep 17 00:00:00 2001 From: Michalis Kargakis Date: Fri, 22 Jun 2018 18:59:19 +0200 Subject: [PATCH 16/74] Use oreg_url from file if present (#3226) --- parts/openshift/release-3.9/openshiftmasterscript.sh | 6 ++++++ parts/openshift/unstable/openshiftmasterscript.sh | 6 ++++++ pkg/openshift/certgen/release39/templates/bindata.go | 4 ++-- .../templates/master/etc/origin/master/master-config.yaml | 2 +- .../master/tmp/ansible/azure-local-master-inventory.yml | 2 +- pkg/openshift/certgen/unstable/templates/bindata.go | 4 ++-- .../templates/master/etc/origin/master/master-config.yaml | 2 +- .../master/tmp/ansible/azure-local-master-inventory.yml | 2 +- 8 files changed, 20 insertions(+), 8 deletions(-) diff --git a/parts/openshift/release-3.9/openshiftmasterscript.sh b/parts/openshift/release-3.9/openshiftmasterscript.sh index fcf0038c03..679d561e79 100644 --- a/parts/openshift/release-3.9/openshiftmasterscript.sh +++ b/parts/openshift/release-3.9/openshiftmasterscript.sh @@ -47,6 +47,11 @@ sed -i -e "s#--loglevel=2#--loglevel=4#" /etc/sysconfig/${SERVICE_TYPE}-master-c rm -rf /etc/etcd/* /etc/origin/master/* /etc/origin/node/* +MASTER_OREG_URL="$IMAGE_PREFIX/$IMAGE_TYPE" +if [[ -f /etc/origin/oreg_url ]]; then + MASTER_OREG_URL=$(cat /etc/origin/oreg_url) +fi + oc adm create-bootstrap-policy-file --filename=/etc/origin/master/policy.json ( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz) @@ -94,6 +99,7 @@ for i in /etc/origin/master/master-config.yaml /tmp/bootstrapconfigs/* /tmp/ansi sed -i "s|COCKPIT_VERSION|${COCKPIT_VERSION}|g; s|COCKPIT_BASENAME|${COCKPIT_BASENAME}|g; s|COCKPIT_PREFIX|${COCKPIT_PREFIX}|g;" $i sed -i "s|VERSION|${VERSION}|g; s|SHORT_VER|${VERSION%.*}|g; s|SERVICE_TYPE|${SERVICE_TYPE}|g; s|IMAGE_TYPE|${IMAGE_TYPE}|g" $i sed -i "s|HOSTNAME|${HOSTNAME}|g;" $i + sed -i "s|MASTER_OREG_URL|${MASTER_OREG_URL}|g" $i done # note: ${SERVICE_TYPE}-node crash loops until master is up diff --git a/parts/openshift/unstable/openshiftmasterscript.sh b/parts/openshift/unstable/openshiftmasterscript.sh index 9d7d3a4fec..7b4ac06deb 100644 --- a/parts/openshift/unstable/openshiftmasterscript.sh +++ b/parts/openshift/unstable/openshiftmasterscript.sh @@ -119,12 +119,18 @@ else sed -i "s|PROMETHEUS_EXPORTER_VERSION|${PROMETHEUS_EXPORTER_VERSION}|g;" /tmp/ansible/azure-local-master-inventory.yml fi +MASTER_OREG_URL="$IMAGE_PREFIX/$IMAGE_TYPE" +if [[ -f /etc/origin/oreg_url ]]; then + MASTER_OREG_URL=$(cat /etc/origin/oreg_url) +fi + for i in /etc/origin/master/master-config.yaml /tmp/bootstrapconfigs/* /tmp/ansible/azure-local-master-inventory.yml; do sed -i "s/TEMPROUTERIP/${routerLBIP}/; s|IMAGE_PREFIX|$IMAGE_PREFIX|g; s|ANSIBLE_DEPLOY_TYPE|$ANSIBLE_DEPLOY_TYPE|g" $i sed -i "s|REGISTRY_STORAGE_AZURE_ACCOUNTNAME|${REGISTRY_STORAGE_AZURE_ACCOUNTNAME}|g; s|REGISTRY_STORAGE_AZURE_ACCOUNTKEY|${REGISTRY_STORAGE_AZURE_ACCOUNTKEY}|g" $i sed -i "s|COCKPIT_VERSION|${COCKPIT_VERSION}|g; s|COCKPIT_BASENAME|${COCKPIT_BASENAME}|g; s|COCKPIT_PREFIX|${COCKPIT_PREFIX}|g;" $i sed -i "s|VERSION|${VERSION}|g; s|SHORT_VER|${VERSION%.*}|g; s|SERVICE_TYPE|${SERVICE_TYPE}|g; s|IMAGE_TYPE|${IMAGE_TYPE}|g" $i sed -i "s|HOSTNAME|${HOSTNAME}|g;" $i + sed -i "s|MASTER_OREG_URL|${MASTER_OREG_URL}|g" $i done mkdir -p /root/.kube diff --git a/pkg/openshift/certgen/release39/templates/bindata.go b/pkg/openshift/certgen/release39/templates/bindata.go index 184ddf2182..c77725467b 100644 --- a/pkg/openshift/certgen/release39/templates/bindata.go +++ b/pkg/openshift/certgen/release39/templates/bindata.go @@ -119,7 +119,7 @@ func masterEtcOriginMasterHtpasswd() (*asset, error) { return a, nil } -var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x79\x6f\x1b\xb9\x15\xff\x5f\x9f\x82\x08\x16\x48\x52\x74\x66\x24\x3b\xe7\x00\x45\xa1\xda\xce\x46\x58\x3b\x51\x65\xa7\xd8\xa2\x2e\x16\x14\xf9\x34\x62\xc4\x21\x27\x3c\x14\x2b\x6e\xbe\x7b\xc1\x63\x4e\x49\x49\x9a\x4d\xb1\xde\x45\xe2\xe1\x3b\xf8\xf8\xde\x8f\xef\x60\x30\x2d\x99\xd6\x4c\x8a\x33\x29\x56\xac\xc8\x47\x08\x55\xdc\x16\xac\xf3\x8d\xd0\xdf\x2c\xe3\xf4\x1c\x56\xd8\x72\xa3\xc3\x12\x42\xc4\x33\x58\x85\x0d\x93\xa2\x5e\x44\x08\x57\xec\x1f\xa0\x9c\xc6\x1c\x6d\x27\xcd\x32\x88\x6d\x8e\xfe\xf5\xef\xe6\x7b\xc3\x04\xcd\xfb\x8a\xc3\x8e\x0d\x87\x02\x2d\xad\x22\xa0\x5b\xdd\x08\x71\x56\x32\xa3\x73\x74\xff\xb9\xb3\xa8\xe0\x83\x05\xdd\x59\xf6\x6a\xdf\x6e\x41\x29\x46\xe1\x3b\x0d\xee\x18\xd8\x68\xea\x58\x38\x97\x74\xae\x40\x83\xf9\x3e\xed\x94\x69\xbc\xe4\x90\xa3\x15\xe6\x1a\x06\x9b\x46\x87\x4c\xfb\xa1\xf1\x4c\xb2\x02\xa1\xd7\x6c\x65\x52\x26\xb3\x59\x89\x0b\x98\x4b\xce\xc8\xee\x3b\x83\x72\x07\xc4\x3a\xce\x85\xe5\x5d\x3f\x27\xa8\xc4\x86\xac\xbd\xfe\xa9\x10\xd2\x78\x75\xbd\x40\x24\x68\x03\xbb\x1c\x31\xc7\xa2\xd3\x9e\x59\x14\xc4\x2e\x69\x54\x77\x64\x10\xda\x62\x6e\x21\x47\x0f\x8d\xb2\xf0\xb0\x43\x11\xb8\x84\xbc\x35\x27\xa1\x20\x18\xd0\x0e\x83\x14\x8b\x43\x70\x48\x1a\x94\xe4\xa8\x92\x54\x1f\x21\x2d\x5d\x14\x75\x0f\x31\xef\x81\x98\x1c\x39\x3b\x3a\xcb\x7a\xc3\xaa\xb7\x7e\x27\xee\xed\x78\x85\x19\xb7\x0a\x06\x7c\x21\x48\x1d\xe7\xc7\xf8\xe0\xa2\x50\x50\x60\x23\x55\xe7\x2e\x29\x79\xb7\x3b\xe3\x0c\x84\x99\x89\x95\x0c\xb6\x13\x50\xe6\x15\x73\xd1\x6f\x45\x92\x95\x92\xc2\x24\x9e\x3f\x25\xca\x78\xc6\x0d\xec\xbe\xc8\xb7\x81\xdd\x08\x57\xec\x12\xb6\xc0\x75\x3e\x4a\x5c\x6c\x07\xa1\xc6\xd6\xac\x5b\x73\xe2\x4d\x79\x0d\x98\x82\x8a\xc6\x78\xe3\xce\xa6\x39\xea\x68\x4e\x08\x6e\x8c\x88\x0c\xb2\x2c\xa5\x78\x83\xcb\x3a\x00\xc9\x11\xa3\x46\x01\x58\x46\xe1\xb0\xcb\x5c\xc1\x8a\xdd\xb5\x52\xbf\x26\x0b\x28\xa5\x81\xe4\xc2\xf1\x24\x7e\xb5\x50\xd2\x56\x81\x7d\x9f\xef\x67\x47\xf4\x8b\x56\x83\x72\x48\x39\xc6\xf9\x4e\x83\x1a\x11\x29\x8c\x92\x9c\x43\x27\x0a\xc0\x81\xb4\x17\x82\x4b\xb2\x79\xe3\x01\xd7\xc0\x36\x29\xb1\x36\xa0\x92\x56\xd8\xa1\x45\x83\xda\x32\x02\xd7\xee\x2f\x51\x9c\x81\x8a\x97\x5d\xb3\x42\xd4\xee\xeb\x46\x33\xf2\x27\x81\xde\x38\xb0\x13\xc7\x01\x87\x8b\x5f\x67\xcb\x1c\x3d\xfc\xd3\xc3\x11\x91\x4a\x4f\x39\x97\x1f\x81\xbe\x55\xac\x60\xc2\x47\xf6\xd1\x5f\xd9\xe3\x2c\x9b\x9c\x3c\xbf\x4d\xc7\xfe\xff\xc9\xa3\xfc\x3f\xb7\x9f\x1e\x37\x24\x2e\x09\xe6\x6b\xa9\xcd\x60\xfd\xfe\x1e\xfd\xdd\x4a\x03\x57\x60\x30\x7a\xc4\x04\x85\x3b\x94\x5e\xf9\xe3\xa6\xb3\xb9\x46\xe3\xc7\xe9\xb5\x51\x4c\x14\xe8\xf3\xe7\x81\xe8\xc6\x2e\x41\x09\x30\xa0\x6f\x53\x1a\x72\xd2\xd7\x39\x6e\x53\xbd\x25\xb7\x29\xe1\xd6\x6d\x71\x9b\x7a\xbb\x8e\x8a\x7d\xc9\xd8\xf4\xe2\xce\xb8\x80\xf3\x60\xed\x6b\xa9\x8d\x8b\xfe\xbe\x9d\x4d\x18\x8f\x99\xd9\x57\x1b\x0f\xff\xbf\xe8\xf3\x87\xfa\xc6\xb3\x0f\xd8\x26\xcf\x4f\x6e\xd3\xd3\xc3\x31\x3b\xb2\xd1\x57\xbc\xd7\x48\xc5\x75\x2a\x74\x0b\xf5\x25\x13\x74\x4a\xa9\x02\xad\x73\x34\x4e\xfd\x7f\xf9\x8b\xf1\xd3\xd3\x48\x7b\x03\xe6\xa3\x54\x9b\x1c\x19\x52\x3d\x19\x81\x21\xb4\x9f\x9d\x08\xce\x51\xb8\x0c\xa9\x23\xb6\x89\xa0\x85\x79\x8f\xec\x85\x23\x4b\x03\xf3\x03\x1c\x0e\xea\x08\x59\xc5\xfd\xb5\x4d\xd0\xda\x98\x4a\xe7\x3e\x34\x07\x02\x92\x9f\x9c\x3e\x7f\xe9\xad\xbb\x36\x52\xe1\x02\xda\x03\xb6\x6e\x8f\xa4\x90\x60\xf2\x0e\x21\x65\xf2\x10\x63\xbf\x02\x3a\x2f\x5e\x3b\x2f\x0e\xd4\x74\x4b\xd9\x01\xb6\xae\x12\x5f\xfc\x5a\xcb\x56\x52\x95\xd8\xe4\x68\x76\x35\xfd\xf9\xe2\xb7\xf9\xe2\xe2\xd5\xec\xd7\x2c\x7c\xdc\xfc\x73\x7e\x91\xfc\x74\x4f\x64\x59\x49\x01\xc2\x7c\xce\x7f\xba\xdf\x06\x4d\xae\x63\xe1\xd8\x80\x36\x75\x33\xc0\x86\x95\xc5\x29\x67\x22\xdc\x85\x05\x14\x4c\x1b\xb5\xab\x9d\x95\x23\x2a\xc9\x06\x54\xa2\x22\xa1\x46\x92\x03\x52\xfe\x74\x3c\x1e\x8f\x42\xbd\x0a\x4e\x8e\xa5\xca\xf9\x86\x83\xd9\x0f\x3d\xc1\xc9\xd2\x0a\xca\xe1\x58\xd4\xa3\xe4\x97\x03\x3f\x60\x0a\xb1\xaf\xa4\x32\x39\x9a\x8c\x4f\x9e\x8e\x47\x6d\x6c\xba\x66\x39\x23\x70\xc5\x5c\xbe\x05\x35\x55\x85\x2d\x41\xd4\xfd\xa6\xb2\xc2\xb0\x12\x12\xd2\x69\x4b\x13\xc7\xad\x33\x0d\xc6\x30\x51\xe8\x74\xf3\xc2\x85\x3e\xdb\x4e\x30\xaf\xd6\x78\xf2\x97\xa6\x6a\xeb\x10\xbb\x64\x89\xc9\x06\x04\xad\xa5\x1d\xbe\x4e\x7b\x0c\x25\x50\x86\x13\xb3\xab\xa0\xdd\xa1\xe2\x8c\xf8\xfe\x27\xdb\x0a\x9a\x76\x50\x56\x29\x69\xe4\xd2\xae\x62\x95\x94\x96\xba\x0a\xb8\x65\x4d\x69\x4d\xd0\x03\xfc\xc9\x2a\x78\xd0\xe1\xe8\xdb\xff\x20\x03\x43\x32\xcf\x14\xfe\x4c\x1d\xdd\xf1\xb7\x65\x61\xe0\x88\x98\x1a\x7c\x09\x61\xa2\x48\x5c\x84\x92\x95\xf3\x7e\x4f\xa7\xf4\xa5\x23\x0b\x01\xc9\xc2\x2d\x7e\x70\x50\xc1\x06\x76\xdf\x22\xbf\x81\xdd\x83\xff\xcb\x49\xcb\x88\x00\x2b\x1c\x3c\x9a\x85\xd9\x3c\x47\xf7\xf7\x5f\xab\x58\x1e\x57\xf4\x62\xcb\x7c\x7d\xbf\x61\x25\x48\x6b\x72\x24\x2c\xe7\x5f\xef\xbe\x22\x5a\x63\xc7\xd3\x05\xf4\x3e\xa4\x7b\x4c\x01\xd0\x9a\xac\x81\xda\x5e\x84\xea\x8d\x1b\x52\x00\x76\xd0\x74\xc0\xaf\x0d\x5f\xfa\x5e\xfb\x56\x39\x36\x08\xfa\x8d\xa4\x30\x97\xca\x2c\xb0\x28\x5c\xc3\xfc\xb0\x43\xbb\xb6\x4b\x01\xce\x57\xcf\x4f\xd2\x53\x9f\xe0\xb3\xc9\x33\x47\x77\x6d\x3a\x71\x92\xa1\x4d\x73\xe3\x56\x74\xae\x37\xdb\x03\x08\x62\x4d\xfd\xa5\xc1\xf1\x59\xec\xef\x84\x08\x4d\xd2\x60\x6e\xc2\x84\x40\xe5\xc8\x06\x84\xb9\xd9\x55\x4e\xf1\x37\x5c\x8a\x3f\x77\x79\xe2\xe1\x10\x5a\x5a\xe5\xf2\xdc\x93\xf1\x78\x14\xa7\x95\x5a\xeb\x37\x29\xf5\x42\x1f\x2a\x9d\xa3\x13\xaf\x61\xff\x30\xee\xb7\x98\x4c\x82\xd3\x9a\x74\x7e\x29\x65\xe5\xee\xff\x1f\x70\xdc\x67\xbf\xfb\xb8\xa7\x5e\xc3\xde\x59\xba\xa7\x1d\xf6\xb2\x5e\x61\xb8\x83\x11\x05\x73\xbb\xe4\x8c\xbc\x5b\x5c\xe6\xbd\xea\x7b\xb4\xcb\xca\x3b\xb5\xd9\x61\xd1\x5d\x37\x11\xda\x87\x36\x5f\xc7\x6c\x12\xdb\x8a\xb3\xd9\xf9\xc2\xe5\xf8\x74\x72\xf2\x22\x00\xf3\xc9\x1e\x4f\x6c\x00\x08\xa3\x6a\x9f\x15\x21\xd7\xc2\x06\x84\x5f\x82\x28\xcc\x3a\x47\x2f\x3b\x91\x9e\xcd\x3b\x3b\x45\x4d\xb1\xcb\xc9\x9c\x8b\x0e\x4b\x47\xab\xe7\xfe\x61\x23\xb4\xfe\x0a\xe8\x1a\x9b\xb6\x97\x4a\xe4\x56\x27\xda\x4b\xb6\x57\xad\x7f\xaa\xde\x75\x93\xfd\xc1\x0a\x6b\x0d\xe6\x07\x38\x38\x23\x52\x68\xc9\x21\x1b\xb9\xa9\x08\x7b\xa0\x36\x59\xb4\x04\xb3\x96\x34\x47\xd8\x1a\xd7\x9a\x30\x0a\xc2\x30\xb3\x9b\xc7\x54\xac\xf3\xd1\xfd\x7d\x82\xd8\x0a\xa5\x17\x02\x2f\x39\x4c\xa7\xe7\x53\x6b\xd6\x8e\x2b\x00\xcd\xe7\xcb\x24\x4e\xdb\x53\x97\x85\xd1\xf4\x3c\x40\x73\x8d\x39\x07\x9f\x6b\xda\x17\x09\x2e\x0b\x26\x3a\xc3\x6f\x89\xab\x8a\x89\xe2\x2a\x9a\x41\x38\x66\xa5\x27\xf4\x8b\xc1\x91\xe7\x86\xd0\x86\xbc\xad\x40\xcc\xce\x67\x03\xd3\xeb\x51\x2a\xa4\xea\x73\x9f\xf9\x53\x6f\x60\x38\x7f\x3a\x9d\x9e\xc7\x3c\x7e\x1e\xb2\x7e\xcb\x7e\x0d\x44\xb9\x74\x78\x54\x24\x30\x74\xc5\x30\x2b\x3b\xcf\x08\x8c\x76\x5f\x3e\xb4\x5d\x36\x5f\x95\x82\x15\x28\x05\xf4\x5d\x9c\x3c\xbb\x8c\x56\xb0\x0f\x16\x7e\x73\xcb\xcd\xea\x90\xa7\x47\x84\x12\x33\xde\xa5\xfa\x85\xf8\x5d\x77\xc6\xd1\x81\xd6\xac\xa5\x62\x9f\xa0\x45\x92\x0f\x46\x5a\x32\xa2\xa4\x96\x2b\x23\x05\x67\xc2\x15\xd1\x32\x1b\x1e\xfc\x06\x04\x8e\x8e\xca\x3c\x4c\x4f\xb2\x46\x5f\xb3\x83\x91\x1b\x10\x3f\x48\xbb\xd7\xe5\xb1\x07\x82\xf6\x30\x76\xe9\xc6\x17\x54\x61\xad\x3f\x4a\x45\x87\x48\x6b\x80\xf5\x63\x81\xb6\x3a\x56\x6d\xd7\xc6\x5b\x42\x7b\x80\x7c\x7d\x33\xf7\x8b\xf3\x68\xe4\x01\x68\xc6\x22\x3a\xdd\x6f\x8f\x7f\x5c\x62\xad\x75\xfd\x5e\x2d\x1a\x06\xaf\xb9\xcd\xd2\x15\xbe\x9b\x16\x70\xed\x6a\x02\x75\x25\xa5\xae\x4a\x91\x1c\xd2\xa2\xd6\xa2\xbb\x18\xae\x8e\x3e\xde\xbf\x04\xb6\x44\x07\xbe\x74\x87\x4b\x07\x68\x0f\x88\xae\x09\xae\xaa\x6a\x7d\xe3\x96\x07\x66\xbc\x78\x56\x37\x03\x0d\x46\x0f\xb1\x3d\x1d\x8f\x47\x15\xb6\xda\xa1\xb0\x7d\x2e\x09\xa9\xaa\x1a\x8c\x4a\x4b\x29\x8d\x36\x0a\x57\x61\x86\x3a\x6a\x7c\x90\xab\x3b\xaf\xa6\x12\xcc\xc4\x4a\x61\x6d\x94\x25\xc6\xaa\xd0\x4a\x55\x98\xf4\x5e\x8b\x98\x63\xe9\xca\x5c\xaf\xb1\x02\xda\x3c\x51\x1e\x12\x1a\x55\x4a\xbe\x07\xd2\x49\xe8\x71\x50\x73\x0d\xdb\xb5\x7f\xa1\x92\x2a\x47\x42\x52\x48\x94\xe4\x90\xf6\xe6\xd9\xcc\x8d\x8e\xd6\x40\x3d\xd3\x44\x65\x8b\xf0\x9e\x77\x05\x5a\xe3\xa6\x4f\xec\xd3\x6e\xa0\xac\xdc\x7c\xd9\x34\x91\xc4\x2a\x66\x76\x53\xce\x25\xc1\x6e\xcb\x70\xe3\x88\x6e\x56\x62\xcf\xa9\xc7\x79\x76\x52\x13\x2f\xf1\x12\xb8\x9e\x83\x9a\x07\xe5\x39\x7a\x1a\x9e\xe4\x18\x1d\xca\x4d\xc6\xf5\x4f\x32\x79\x59\xff\x64\x7e\x75\xa4\xa4\x75\xa3\x5a\xeb\x03\x6d\x97\x54\x96\xd8\xdd\xfe\x9b\x8b\xab\xf9\xe2\xed\xbb\x9b\x8b\xc5\x6c\x9e\x0a\x56\xb9\xf9\x3b\xd6\xe1\x29\x21\x6e\x3c\x68\xc5\xfc\x3f\x03\x04\x70\x2e\x5c\x8a\x06\x41\x40\xb7\xa5\xab\xc4\x02\x17\x40\x9b\xd7\xca\xa4\xf6\xb5\xff\xdd\xbf\x06\xfb\x8b\xed\xd6\x2b\x2e\x77\x5f\xb9\xe5\x95\x62\x5b\x6c\xe0\x97\xc1\x2b\x1e\x0e\x56\xb9\x7e\xcd\xd3\xeb\x39\xd7\x67\x83\xc8\x1c\xb7\xdf\x93\xf0\x3c\x5e\x40\x87\xd7\xc5\x7a\x3e\x39\xf8\x7e\x73\xf0\xa6\xef\xbf\xe6\x1c\x98\x6d\xb4\x9f\xa5\xeb\x61\xbe\x79\xea\x6d\x5e\x75\x86\x63\x4e\xe4\x0f\x27\x29\xf1\x5d\x04\x91\x9e\x89\x57\x9c\x15\x6b\x13\x6e\x62\xf3\x90\x1c\x07\xae\x7e\x52\xd9\x4a\x6e\xcb\xce\xbb\x08\xdd\x09\x5c\x32\xe2\x13\xaa\xcb\x16\x4c\x14\xa1\x3f\xa1\x31\xe5\xff\x37\x00\x00\xff\xff\x96\xdf\x75\xfd\x84\x1a\x00\x00") +var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x58\x79\x6f\x1b\xbb\x11\xff\x5f\x9f\x82\x08\x1e\x90\xa4\xe8\xae\x24\x3b\xe7\x02\x45\xa1\xda\xce\x8b\xf0\xec\x58\x95\x9c\xa2\x40\x5d\x04\x14\x39\x5a\x31\xe2\x92\x1b\x1e\x8a\x15\x37\xdf\xbd\xe0\xb1\xa7\xa4\x38\xcd\x4b\xf1\x9c\x20\xf1\x92\x33\xc3\xe1\xcc\x8f\x73\x61\x5a\x30\xad\x99\x14\x67\x52\xac\x58\x9e\x0d\x10\x2a\xb9\xcd\x59\xeb\x1b\xa1\xbf\x59\xc6\xe9\x39\xac\xb0\xe5\x46\x87\x25\x84\x88\x27\xb0\x0a\x1b\x26\x45\xb5\x88\x10\x2e\xd9\x3f\x40\x39\x89\x19\xda\x8e\xeb\x65\x10\xdb\x0c\xfd\xeb\xdf\xf5\xf7\x86\x09\x9a\x75\x05\x87\x13\x6b\x0a\x05\x5a\x5a\x45\x40\x37\xb2\x11\xe2\xac\x60\x46\x67\xe8\xfe\x6b\x6b\x51\xc1\x27\x0b\xba\xb5\xec\xc5\x5e\x6f\x41\x29\x46\xe1\x07\x15\x6e\x29\x58\x4b\x6a\x69\x38\x93\x74\xa6\x40\x83\xf9\x31\xe9\x94\x69\xbc\xe4\x90\xa1\x15\xe6\x1a\x7a\x87\x46\x83\x4c\xba\xae\xf1\x44\xb2\x04\xa1\xd7\x6c\x65\x52\x26\x87\xd3\x02\xe7\x30\x93\x9c\x91\xdd\x0f\x3a\xe5\x0e\x88\x75\x94\x73\xcb\xdb\x76\x4e\x50\x81\x0d\x59\x7b\xf9\x13\x21\xa4\xf1\xe2\x3a\x8e\x48\xd0\x06\x76\x19\x62\x8e\x44\xa7\x1d\xb5\x28\x88\x5d\x52\x8b\x6e\xf1\x20\xb4\xc5\xdc\x42\x86\x1e\x1b\x65\xe1\x71\x6b\x47\xe0\x02\xb2\x46\x9d\x84\x82\x60\x40\x5b\x04\x52\xcc\x0f\xc1\x21\xa9\x51\x92\xa1\x52\x52\x7d\x64\x6b\xe9\xbc\xa8\x3b\x88\xf9\x08\xc4\x64\xc8\xe9\xd1\x5a\xd6\x1b\x56\x5e\xfb\x93\xb8\xd7\xe3\x0d\x66\xdc\x2a\xe8\xd1\x05\x27\xb5\x8c\x1f\xfd\x83\xf3\x5c\x41\x8e\x8d\x54\xad\xb7\xa4\xe4\xdd\xee\x8c\x33\x10\x66\x2a\x56\x32\xe8\x4e\x40\x99\x37\xcc\x79\xbf\x61\x49\x56\x4a\x0a\x93\x78\xfa\x94\x28\xe3\x09\x37\xb0\xfb\x26\xdd\x06\x76\x03\x5c\xb2\x4b\xd8\x02\xd7\xd9\x20\x71\xbe\xed\xb9\x1a\x5b\xb3\x6e\xd4\x89\x2f\xe5\x2d\x60\x0a\x2a\x2a\xe3\x95\x3b\x9b\x64\xa8\x25\x39\x21\xb8\x56\x22\x12\xc8\xa2\x90\xe2\x1d\x2e\x2a\x07\x24\x47\x94\x1a\x04\x60\x19\x85\xc3\x29\x33\x05\x2b\x76\xd7\x70\xfd\x33\x99\x43\x21\x0d\x24\x17\x8e\x26\xf1\xab\xb9\x92\xb6\x0c\xe4\xfb\x74\xbf\xba\x4d\xbf\x68\x35\x28\x87\x94\x63\x94\xef\x35\xa8\x01\x91\xc2\x28\xc9\x39\xb4\xbc\x00\x1c\x48\xf3\x20\xb8\x24\x9b\x77\x1e\x70\x35\x6c\x93\x02\x6b\x03\x2a\x69\x98\x1d\x5a\x34\xa8\x2d\x23\xb0\x70\xff\x89\xfc\x0c\x54\x7c\xec\x9a\xe5\xa2\x32\x5f\xdb\x9b\x91\x3e\x09\xfb\xb5\x01\x5b\x7e\xec\x51\x38\xff\xb5\x8e\xcc\xd0\xe3\x3f\x3d\x1e\x10\xa9\xf4\x84\x73\xf9\x19\xe8\xb5\x62\x39\x13\xde\xb3\x4f\xfe\xca\x9e\x0e\x87\xe3\x93\x97\xb7\xe9\xc8\xff\x1d\x3f\xc9\xfe\x73\xfb\xe5\x69\xbd\xc5\x25\xc1\x7c\x2d\xb5\xe9\xad\xdf\xdf\xa3\xbf\x5b\x69\xe0\x0a\x0c\x46\x4f\x98\xa0\x70\x87\xd2\x2b\x7f\xdd\x74\x3a\xd3\x68\xf4\x34\x5d\x18\xc5\x44\x8e\xbe\x7e\xed\xb1\x6e\xec\x12\x94\x00\x03\xfa\x36\xa5\x21\x26\x3d\x4c\x71\x9b\xea\x2d\xb9\x4d\x09\xb7\xee\x88\xdb\xd4\xeb\x75\x94\xed\x5b\xca\xa6\x17\x77\xc6\x39\x9c\x07\x6d\xdf\x4a\x6d\x9c\xf7\xf7\xf5\xac\xdd\x78\x4c\xcd\xae\xd8\x78\xf9\xff\x45\x9e\xbf\xd4\x77\xde\xbd\x47\x36\x7e\x79\x72\x9b\x9e\x1e\xf6\xd9\x91\x83\x1e\xb0\x5e\xcd\x15\xd7\xa9\xd0\x0d\xd4\x97\x4c\xd0\x09\xa5\x0a\xb4\xce\xd0\x28\xf5\x7f\xb2\x57\xa3\xe7\xa7\x71\xef\x1d\x98\xcf\x52\x6d\x32\x64\x48\xf9\x6c\x00\x86\xd0\x6e\x74\x22\x38\x43\xe1\x31\xa4\x6e\xb3\x09\x04\x0d\xcc\x3b\xdb\x9e\x39\x92\xd4\x30\x3f\x40\xe1\xa0\x8e\x90\x55\xdc\x3f\xdb\x04\xad\x8d\x29\x75\xe6\x5d\x73\xc0\x21\xd9\xc9\xe9\xcb\xd7\x5e\xbb\x85\x91\x0a\xe7\xd0\x5c\xb0\x31\x7b\xdc\x0a\x01\x26\x6b\x6d\xa4\x4c\x1e\x22\xec\x66\x40\x67\xc5\x85\xb3\x62\x4f\x4c\x3b\x95\x1d\x20\x6b\x0b\xf1\xc9\xaf\xd1\x6c\x25\x55\x81\x4d\x86\xae\x26\x8b\x9b\x8b\xf9\x87\xeb\xf9\xc5\xaf\x1f\xde\xcf\x2f\x93\x5f\xee\x89\x2c\x4a\x29\x40\x98\xaf\xd9\x2f\xf7\xdb\x20\xc1\x55\x2a\x1c\x1b\xd0\xa6\x2a\x02\x58\x3f\xa3\x38\xa1\x4c\x84\x37\x30\x87\x9c\x69\xa3\x76\x95\x91\x32\x44\x25\xd9\x80\x4a\x54\xdc\xa8\x10\xe4\x00\x94\x3d\x1f\x8d\x46\x83\x90\xa7\x82\x71\x63\x8a\x72\x36\xe1\x60\xf6\x5d\x4e\x70\xb2\xb4\x82\x72\x38\xe6\xed\xc8\xf9\x6d\x87\xf7\x88\x82\xcf\x4b\xa9\x4c\x86\xc6\xa3\x93\xe7\xa3\x41\xe3\x93\xb6\x5a\x4e\x09\x5c\x32\x17\x67\x41\x4d\x54\x6e\x0b\x10\x55\x9d\xa9\xac\x30\xac\x80\x84\xb4\xca\xd1\xc4\x51\xeb\xa1\x06\x63\x98\xc8\x75\xba\x79\xe5\x5c\x3e\xdc\x8e\x31\x2f\xd7\x78\xfc\x97\x3a\x5b\xeb\xe0\xb3\x64\x89\xc9\x06\x04\xad\xb8\x1d\xae\x4e\x3b\x04\x05\x50\x86\x13\xb3\x2b\xa1\x39\xa1\xe4\x8c\xf8\xba\x67\xb8\x15\x34\x6d\xa1\xab\x54\xd2\xc8\xa5\x5d\xc5\xec\x28\x2d\x75\x99\x6f\xcb\xea\x94\x9a\xa0\x47\xf8\x8b\x55\xf0\xa8\x45\xd1\xd5\xff\xd1\x10\x0c\x19\x7a\xa2\xf0\x6f\xea\xf6\x1d\x7d\x93\x0e\x7a\x86\x88\x21\xc1\xa7\x0e\x26\xf2\xc4\x79\x28\x59\x39\xeb\x77\x64\x4a\x9f\x32\x86\xc1\x21\xc3\xf0\x7a\x1f\x1d\x14\xb0\x81\xdd\xf7\xf0\x6f\x60\xf7\xe8\xff\x72\xd3\x22\x22\xc0\x0a\x07\x8f\x7a\x61\x3a\xcb\xd0\xfd\xfd\x43\x99\xca\xe3\x8a\x5e\x6c\x99\xcf\xeb\x37\xac\x00\x69\x4d\x86\x84\xe5\xfc\xe1\xaa\x2b\xa2\x35\x56\x3a\x6d\x40\xef\x43\xba\x43\x14\x00\xad\xc9\x1a\xa8\xed\x78\xa8\x3a\xb8\xde\x0a\xc0\x0e\x92\x0e\xd8\xb5\xa6\x4b\x3f\x6a\x5f\x22\xc7\xc2\x40\xbf\x93\x14\x66\x52\x99\x39\x16\xb9\x2b\x94\x1f\xb7\xf6\x16\x76\x29\xc0\xd9\xea\xe5\x49\x7a\xea\x03\xfb\x70\xfc\xc2\xed\xbb\xf2\x9c\x38\xce\x50\x9e\xb9\x36\x2b\x1a\xd7\xab\xed\x01\x04\x31\x97\xfe\x56\xe3\xf8\x2c\xd6\x75\x42\x84\xe2\xa8\xd7\x2f\x61\x42\xa0\x74\xdb\x06\x84\xb9\xd9\x95\x4e\xf0\x77\x3c\x8a\x3f\xb7\x69\xe2\xe5\x10\x5a\x5a\xe5\xe2\xdc\xb3\xd1\x68\x10\xbb\x94\x4a\xea\x77\x09\xf5\x4c\x9f\x4a\x9d\xa1\x13\x2f\x61\xff\x32\xee\xb7\x18\x4c\x82\xd1\xea\x30\x7e\x29\x65\xe9\xde\xff\x1f\x70\xdd\x17\xbf\xfb\xba\xa7\x5e\xc2\xde\x5d\xda\xb7\xed\xd7\xb0\x5e\x60\x78\x83\x11\x05\x33\xbb\xe4\x8c\xbc\x9f\x5f\x66\x9d\xac\x7b\xb4\xba\xca\x5a\x39\xd9\x61\xd1\x3d\x37\x11\xca\x86\x26\x5e\xc7\x68\x12\xcb\x89\xb3\xe9\xf9\xdc\xc5\xf8\x74\x7c\xf2\x2a\x00\xf3\xd9\x1e\x4d\x4c\xfc\x84\x51\xb5\x4f\x8a\x90\x2b\x5d\x03\xc2\x2f\x41\xe4\x66\x9d\xa1\xd7\x2d\x4f\x4f\x67\xad\x93\xa2\xa4\x58\xdd\x0c\x9d\x89\x0e\x73\x47\xad\x67\x7e\xa0\x11\x4a\x7e\x05\x74\x8d\x4d\x53\x43\x25\x72\xab\x13\xed\x39\x9b\xa7\xd6\xbd\x55\xe7\xb9\xc9\x6e\x43\x85\xb5\x06\xf3\x13\x0c\x3c\x24\x52\x68\xc9\x61\x38\x70\xdd\x10\xf6\x40\xad\xa3\x68\x01\x66\x2d\x69\x86\xb0\x35\xae\x24\x61\x14\x84\x61\x66\x37\x8b\xa1\x58\x67\x83\xfb\xfb\x04\xb1\x15\x4a\x2f\x04\x5e\x72\x98\x4c\xce\x27\xd6\xac\x1d\x55\x00\x9a\x8f\x97\x49\xec\xb2\x27\x2e\x0a\xa3\xc9\x79\x80\xe6\x1a\x73\x0e\x3e\xd6\x34\x93\x08\x2e\x73\x26\x5a\x4d\x6f\x81\xcb\x92\x89\xfc\x2a\xaa\x41\x38\x66\x85\xdf\xe8\x26\x83\x23\x63\x86\x50\x86\x5c\x97\x20\xa6\xe7\xd3\x9e\xea\x55\x0b\x15\x42\xf5\xb9\x8f\xfc\xa9\x57\x30\xdc\x3f\x9d\x4c\xce\x63\x1c\x3f\x0f\x51\xbf\x21\x5f\x00\x51\x2e\x1c\x1e\x65\x09\x04\x6d\x36\xcc\x8a\xd6\xf8\x80\xd1\xf6\xc4\x43\xdb\x65\xfd\x55\x2a\x58\x81\x52\x40\xdf\xc7\x8e\xb3\x4d\x68\x05\xfb\x64\xe1\x83\x5b\xae\x57\xfb\x34\x9d\x4d\x28\x30\xe3\xed\x5d\xbf\x10\xbf\xab\x8a\x38\x1a\xd0\x9a\xb5\x54\xec\x0b\x34\x48\xf2\xce\x48\x0b\x46\x94\xd4\x72\x65\xa4\xe0\x4c\xb8\x24\x5a\x0c\xfb\x17\xbf\x01\x81\xa3\xa1\x86\x1e\xa6\x27\xc3\x5a\x5e\x7d\x82\x91\x1b\x10\x3f\x49\xba\x97\xe5\xb1\x07\x82\x76\x30\x76\xe9\xda\x16\x54\x62\xad\x3f\x4b\x45\xfb\x48\xab\x81\xf5\x73\x81\xb6\x3a\x96\x6d\xd7\xc6\x6b\x42\x3b\x80\x7c\x7b\x33\xf3\x8b\xb3\xa8\xe4\x01\x68\xc6\x24\x3a\xd9\x2f\x8f\x7f\x5e\x60\xad\x64\xfd\x5e\x29\x1a\x7a\x53\xdc\x7a\xe9\x0a\xdf\x4d\x72\x58\xb8\x9c\x40\x5d\x4a\xa9\xb2\x52\xdc\x0e\x61\x51\x6b\xd1\x5e\x0c\x4f\x47\x1f\xaf\x5f\x02\x59\xa2\x03\x5d\xba\xc3\x85\x03\xb4\x07\x44\x5b\x05\x97\x55\xb5\xbe\x71\xcb\x3d\x35\x5e\xbd\xa8\x8a\x81\x1a\xa3\x87\xc8\x9e\x8f\x46\x83\x12\x5b\xed\x50\xd8\x8c\x49\x42\xa8\x2a\x7b\xad\xd2\x52\x4a\xa3\x8d\xc2\x65\xe8\xa1\x8e\x2a\x1f\xf8\xaa\xca\xab\xce\x04\x53\xb1\x52\x58\x1b\x65\x89\xb1\x2a\x94\x52\x25\x26\x9d\x29\x11\x73\x24\x6d\x9e\xc5\x1a\x2b\xa0\xf5\x68\xf2\x10\xd3\xa0\x54\xf2\x23\x90\x56\x40\x8f\x8d\x9a\x2b\xd8\x16\x7e\x32\x25\x55\x86\x84\xa4\x90\x28\xc9\x21\xed\xf4\xb1\x43\xd7\x3a\x5a\x03\x55\x4f\x13\x85\xcd\xc3\x1c\xef\x0a\xb4\xc6\x75\x9d\xd8\xdd\xbb\x81\xa2\x74\xfd\x65\x5d\x44\x12\xab\x98\xd9\x4d\x38\x97\x04\xbb\x23\xc3\x8b\x23\xba\x5e\x89\x35\xa7\x1e\x65\xc3\x93\x6a\xf3\x12\x2f\x81\xeb\x19\xa8\x59\x10\x9e\xa1\xe7\x61\x14\xc7\x68\x9f\x6f\x3c\xaa\x7e\x92\xf1\xeb\xea\x67\xe8\x57\x07\x4a\x5a\xd7\xaa\x35\x36\xd0\x76\x49\x65\x81\xdd\xeb\xbf\xb9\xb8\x9a\xcd\xaf\xdf\xdf\x5c\xcc\xa7\xb3\x54\xb0\xd2\xf5\xdd\x31\x0f\x4f\x08\x71\xed\x41\xc3\xe6\xc7\xff\x01\x9c\x73\x17\xa2\x41\x10\xd0\x4d\xea\x2a\xb0\xc0\x39\xd0\x7a\x4a\x99\x54\xb6\xf6\xbf\xfb\x29\xb0\x7f\xd8\x6e\xbd\xe4\x72\xf7\xc0\x2b\x2f\x15\xdb\x62\x03\xbf\xf5\xa6\x77\x38\x68\xe5\xea\x35\xbf\x5f\xf5\xb9\x3e\x1a\x44\xe2\x78\xfc\x1e\x87\xa7\xf1\x0c\x3a\x4c\x15\xab\xfe\xe4\xe0\xdc\xe6\xe0\x4b\xdf\x9f\xe2\x1c\xe8\x6d\xb4\xef\xa5\xab\x66\xbe\x1e\xf1\xd6\xd3\x9c\x7e\x9b\x13\xe9\xc3\x4d\x0a\x7c\x17\x41\xa4\xa7\xe2\x0d\x67\xf9\xda\x84\x97\x58\x0f\x90\x63\xc3\xd5\x0d\x2a\x5b\xc9\x6d\xd1\x9a\x87\xd0\x9d\xc0\x05\x23\x3e\xa0\xba\x68\xc1\x44\x1e\xea\x13\x1a\x43\xfe\x7f\x03\x00\x00\xff\xff\x2a\xb2\x27\xe9\x7c\x1a\x00\x00") func masterEtcOriginMasterMasterConfigYamlBytes() ([]byte, error) { return bindataRead( @@ -199,7 +199,7 @@ func masterTmpAnsibleAnsibleSh() (*asset, error) { return a, nil } -var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x58\x6d\x6f\xdb\xc8\x11\xfe\xee\x5f\x31\x90\x03\xa8\x45\x23\xa9\x49\x0e\x68\x8f\x40\x3e\xf8\x7c\xcc\xd9\xb8\xb3\x65\x48\x4a\x91\xb4\x28\x88\x25\x77\x28\x6e\xbd\xda\xe1\xed\x2e\x25\xeb\x0c\xff\xf7\x62\xb8\x4b\xbd\xd0\x2f\xf1\x45\x9f\x44\x71\xe6\x99\x67\xe7\x7d\x35\x1a\x8d\x4e\x4e\xe1\xd3\xe5\x97\xab\x34\x81\x33\xb3\xf5\x95\x32\x4b\xc8\x51\xd3\x06\x94\x01\xa1\xf5\xa8\x10\xb5\x83\x4a\x38\x50\x7e\xe8\x60\x2d\x74\x83\x60\xb1\xd6\xa2\x40\x09\xf9\x16\x1c\x4a\x16\xf5\x15\x9e\x9c\x42\xfc\x50\x8d\xc6\x55\xaa\xf4\x2b\xe1\x3c\x5a\x57\x58\x55\xfb\xb1\xab\x60\x53\xa9\xa2\x02\xe5\xc0\x90\x07\x25\x51\xe8\xb7\xb0\x41\x70\x15\x35\x5a\x42\xa9\xee\xc0\x57\xc2\x8f\x4f\x34\x15\x42\x07\xe5\xe4\x04\xa0\x22\xe7\x1d\x7f\x01\x68\xdf\xf0\xf3\x09\xc0\x5a\xd8\xf8\xab\x30\x4e\xe5\x1a\xb3\x82\x8c\xc1\xc2\x2b\x32\x49\x90\x3c\x7a\x5b\x6f\x7d\x45\x26\x53\xc6\xa3\xad\x2d\x32\x38\x4c\x1a\x67\x27\xb9\x32\x93\xf0\xf2\xfd\xc9\xc9\xd1\x01\x32\xe5\x18\xd4\x0b\x65\xd0\xaa\x3f\x50\x26\xf0\x49\x68\x87\x3d\xa9\x42\x2b\x34\x3e\xcb\x95\x11\x76\x7b\x00\x4a\xc5\x23\xbc\x95\x58\x62\xe6\xc5\x32\x81\xc1\xfa\x5f\xe9\x6c\x7e\x39\xbd\x1e\x04\xa1\x53\xb8\x9e\x2e\xd2\x04\x16\x95\x72\x50\x54\xc2\x2c\xd1\xb1\x6b\x3f\x8c\xdf\xfd\xfd\x6f\x20\x8c\x0c\x21\x10\x5a\x43\x41\xab\x5c\x99\xd6\xf3\x9e\x40\x80\x53\x66\xa9\x91\x1d\xa2\x44\xae\x31\xc2\x85\x4f\x63\x24\x5a\x8e\x0f\x18\xb1\xc2\x03\x2a\x1b\xcc\xf9\x6c\x8e\x34\x46\x5a\x2c\xd0\xe3\x7b\x28\x54\x5b\x2c\xd5\x5d\x02\x83\xcb\xab\xb3\x5f\xd2\xec\x66\x96\x7e\xba\xfc\x32\x19\xbc\xa0\xb1\x87\xdd\x69\x2d\xbe\xde\xa4\xa3\x0d\xe6\xa3\x28\xf3\x92\xfa\x1a\xad\x6b\x63\xd9\x77\x15\x59\x5c\x66\x8d\xd5\x59\xcc\x11\x18\x1e\x51\x3a\xb0\xf4\xe6\xbe\xa0\x55\x4d\x06\x8d\x7f\x48\xde\xdc\x47\xc4\x87\x61\xcf\x6a\xc0\xc9\x24\x96\xa2\xd1\x3e\x73\x4d\x2e\x69\x25\x94\x49\x60\xb8\x48\xaf\x6e\x66\xd3\xcf\x8b\x74\x76\x79\x33\x36\xaa\x1e\x2b\x1a\x76\x01\x6b\x2b\x27\x7e\x6f\xc3\x16\x33\x39\x47\xf0\xdb\x1a\x3f\x2a\x53\x5a\xf1\x16\xf2\xc6\x73\x9a\x57\x62\x8d\xe0\x09\xb4\x5a\x23\x6c\x94\xaf\xc0\xe2\x52\x91\x09\x62\x50\x92\x05\x43\x9b\x08\x97\x63\x21\x1a\x87\x40\x25\x68\x5c\x8a\x62\x0b\x16\x85\x23\xe3\x7a\xcc\x2d\x35\xcc\xdc\xa1\xc6\xc2\x13\xbb\xe2\x10\xb4\x7f\x4e\x7e\xe7\xbc\xdd\xbe\x2c\x7f\x0a\xd7\xd4\x96\x27\x6c\xaa\x2d\x78\x3e\x98\x72\x20\x40\xaa\xb2\x44\x8b\xc6\x83\x14\x5e\xb4\x47\x0c\x87\x53\x1e\x54\x9f\x58\x6d\x69\x85\xbe\xc2\xc6\x65\x86\x24\x1e\x58\xbc\x8f\x26\x87\x09\x0c\x83\xd5\x87\xce\xa1\x73\xf4\x70\xf6\x47\x63\x11\x5c\x8d\x85\x2a\x55\x11\x4c\xb1\x6f\xb8\xea\x51\x42\x77\x84\x9e\xb9\xf0\xf6\xe0\x80\x9e\x2c\x27\x5f\x6d\x69\xad\x24\xe7\xc8\x40\x30\x70\x96\x6b\xca\xfb\x49\xf7\x9c\xf2\xad\x32\x32\x81\x01\xe5\xff\xc3\xc2\xbf\x56\x69\x6f\x26\x13\x45\x41\x8d\xf1\xa1\x00\x86\xb3\xf4\x97\xcb\xf9\x62\xf6\x35\x9b\x2f\xa6\x33\xce\xd0\xb3\x7f\x7f\x9e\xa5\xd9\xd9\xf9\xf9\xf4\xf3\xf5\xe2\xfa\xec\x2a\xed\x87\xeb\xf5\x26\x6e\x71\xfb\x4d\x0b\xbf\xa6\x5f\xbf\xc3\xc0\xae\xfd\x25\x30\xe8\xe4\xbe\xc3\x15\x16\x85\x5e\x25\x30\x28\xc8\xe2\x78\xa3\x8c\xa4\x8d\x1b\x1b\xf4\x83\x27\x6a\x29\xfe\x72\x21\xac\x84\x82\x24\x86\x14\x8c\xf5\x31\x3e\x92\x39\x6f\x2c\x27\xa4\xde\xb6\x1d\xae\xd0\x0d\xd7\x31\x38\x2f\x3c\x82\xf0\x20\xb1\xd6\xb4\x5d\x71\xca\x7a\xb5\x42\x90\x84\x61\xf8\x84\x5a\xe4\xa6\x48\x12\x5d\x04\x0b\xfc\xd1\xa2\xe4\x32\xe5\xb7\xa1\x2f\x84\x62\xd5\x22\x47\xed\x40\xd4\xb5\x56\x28\xdb\x86\x6c\x51\xc8\x2d\xcb\xe6\x08\xbf\x37\x68\x15\xca\x08\x25\x96\x42\x19\xe7\x99\x03\xe3\xd4\xa4\x8c\x6f\xe7\x24\xb3\x08\xf3\x32\x92\x6b\x47\x5e\x10\xd2\x62\x9b\x13\xdd\x3a\xb0\x8d\x19\xc3\x99\x76\xf4\x36\xc2\xf1\xeb\x50\xe9\xed\xf0\x55\x85\x80\x36\xec\x10\x3b\x16\x0c\x4a\xa1\xb5\x83\x5c\x14\xb7\x03\x26\xf4\x8e\x79\x5a\xaa\xad\x12\x1e\xf5\x16\x36\x15\x5a\x04\xe1\x0e\xf1\x62\xb4\x76\x88\x9a\x96\x5c\x6f\xd1\x45\x63\x58\xb4\x3a\x1b\xc1\x33\xc7\x11\x48\xe5\x8a\xc6\x71\xfb\x04\x91\x13\x57\x7d\x19\xd1\xc2\x5c\xda\xdb\x63\x02\x92\x42\xd4\x62\x9f\x6b\x2d\x38\xf8\xf8\x11\x42\xb3\x6b\xdd\xbe\x6b\x72\x0c\x10\xb1\x6a\xb4\x25\x16\x1c\xd0\x12\x45\x3b\xb7\x19\xad\x0d\x97\x30\x07\xca\xf1\xfc\x95\x5a\x56\xed\x78\x13\x9d\x4f\x9d\xb2\xbb\x28\x74\x76\x8f\x73\x66\x86\x6b\xe5\x94\x07\x2d\xd8\x9f\x7f\xa9\xc9\xb1\x9d\x2d\x37\x38\x81\x2b\x32\x0e\x3d\x90\x85\x37\xe4\x2b\xb4\x7f\x7d\x26\xd5\x43\xdb\xed\x0c\x24\xf0\xee\x1b\x25\x71\x28\xf9\x7c\xa7\x6c\x13\x37\x81\x41\x6d\xd1\xa1\x79\xd4\x72\xfa\x3d\x15\xef\x6a\xb2\xcc\x23\x0c\xda\xdd\xbc\x1c\xde\xcc\xa6\x57\xe9\xe2\x22\xfd\x3c\xcf\xd2\x2f\x37\xd3\xd9\x22\x9d\x65\x71\x84\x0e\xfb\xe6\x8f\x06\xb6\x71\x5e\x68\x9d\xc0\xc2\x36\xf8\x02\xcf\x60\xef\x75\x0b\xc1\xbe\x06\x33\x9e\x18\x09\x0c\xcf\xae\xe7\x97\x3f\xfd\x96\x66\x3f\xa7\x37\xbf\x4d\xbf\xb6\x83\x3a\xb2\xea\x56\x35\x87\x76\xad\x0a\xcc\x72\x4b\xb7\x7c\xbc\x23\x5a\xc7\xf0\x9d\x68\x21\xbc\xd0\xb4\x7c\x91\xda\xc1\x62\xd0\x67\xf9\x34\xcc\xde\xa3\xeb\x9d\xfb\x9e\xdc\x1c\xd0\x17\x92\x37\x12\x97\xc0\x7f\x06\x95\xf7\xb5\x4b\x26\x93\x8b\xe9\xbc\x6d\xeb\xc9\xfb\x0f\xff\xf8\x71\xf0\xdf\x70\x44\x8f\xab\x9a\x33\xaf\x7f\xc6\x97\x9c\xf9\x9c\xce\x73\xfb\x55\x27\x3f\x8a\xf2\xa3\x20\xff\x32\xd8\xb3\xdb\xd6\xd1\x62\xaa\x1c\x58\x94\x8d\x91\xc2\xf8\x30\xfc\x2d\xfe\xde\xa8\xd8\x33\x2b\x61\x24\x2f\xa2\x01\x0a\xdc\x2d\x6e\x20\x47\xbf\x41\x34\x47\x4b\xe9\xce\x7f\xa3\x18\x71\xee\x89\x53\xab\x96\x7c\xeb\x30\x12\xa6\xe7\x37\x3d\x3f\xe3\x9d\x58\xd5\x1a\xc3\x3a\xce\xb9\x74\x40\x77\x7e\x31\x9d\x2d\x38\xbf\x07\xfd\x94\x2d\xa8\xb8\xad\x55\x97\x83\x87\x5e\x3e\x9f\x9e\xff\x7a\x73\xb9\x78\x2e\x69\x1f\x29\xe6\xc2\x61\x74\x74\xa7\xfa\xd3\xd9\x3c\xe5\xf0\x7e\x53\x77\x4f\xb5\x53\x7d\xda\xc1\x3f\x53\x3b\x9b\x24\x96\xca\xe0\x37\x7b\x0e\x38\xda\xcf\x0e\x2f\xdc\x2d\x94\x6a\xb7\xfb\x07\xd9\xf1\x76\xa5\x41\x19\xa7\x64\x18\x76\x7d\x48\xb0\xa4\x11\x4a\x4b\xab\x27\x22\xb2\x51\x5a\x77\xd3\xac\xf1\x54\x53\xdd\x70\xda\xf0\xb2\xd7\xf0\x85\xe3\x49\xc4\xfe\x3e\xda\x46\x93\x47\xe3\x56\x99\xe5\xc1\xf0\x31\xcd\x2a\x47\xcb\xdb\xed\xc1\x3c\xe8\xa7\xdb\xfe\x46\xe3\xe2\xa5\x54\x58\xce\x14\x8f\xd6\x08\xcd\xf9\xf6\x98\x35\xdb\xdb\xe0\xd0\x1e\x5f\x82\xc2\x16\x18\x49\xaf\xa0\x1d\x6c\x7c\x73\x5a\x93\x92\x2d\x1f\x65\x0a\xde\x24\x78\xdf\x70\x9e\x69\x95\xa2\xf0\x50\x2a\x23\x3b\xde\x3b\x53\xe1\xca\x09\x7c\x09\x5b\x91\xe9\x9e\xf8\xd9\x94\x6a\xd9\x26\x4a\x02\x13\xf4\xc5\x84\xda\x84\x9e\xec\x24\x5e\x99\xc3\x41\x78\x7f\xfd\x8d\x47\x68\x72\xad\x8a\x5d\xa7\x6e\xac\x4e\x60\xd7\x69\xee\xef\x61\x9c\xde\x05\xbf\x5c\xb5\x8a\x17\xe4\xda\xbd\x13\x1e\x1e\x92\x7f\xfe\xf0\xc3\x87\xc9\xd1\x7d\xeb\x00\x50\xd4\xea\x4f\x83\xed\x41\x5e\xd3\xf7\xe0\x34\xfe\xc3\x70\x4d\xa0\xc9\x2c\xd1\x82\x41\x94\xbc\x43\x39\x76\xf5\xa3\x20\x8e\x3e\x8c\x7f\x1c\xbf\x7f\x3f\x7a\x17\x6f\x4e\x43\x8b\x6d\x61\x90\x09\x09\xdf\xb5\x97\x2d\xfa\x48\x84\xd3\x67\xef\x2a\x7e\x8a\x3b\x77\x47\x65\x78\x72\x42\x14\x9b\xb5\xcb\x3c\x65\x21\x5a\xcf\xfe\xbb\xf0\xff\x00\x00\x00\xff\xff\xce\xfa\x8f\x78\x1a\x11\x00\x00") +var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x58\x6d\x6f\xdb\xc8\x11\xfe\xee\x5f\x31\x90\x03\xa8\x45\x23\xa9\x49\x0e\x68\x8f\x40\x3e\xe8\x7c\x4c\x6c\x5c\x6c\x19\x92\x5c\x24\x2d\x0a\x62\xc9\x1d\x8a\x5b\xaf\x76\x78\xbb\x4b\xc9\x3a\xc3\xff\xbd\x18\xee\xea\x8d\x7e\x89\x2f\xfa\x24\x8a\x33\xcf\xcc\xce\x3c\xf3\xb2\x1a\x0c\x06\x27\xa7\xf0\xe9\xe2\xeb\x65\x9a\xc0\xd8\x6c\x7c\xa5\xcc\x02\x72\xd4\xb4\x06\x65\x40\x68\x3d\x28\x44\xed\xa0\x12\x0e\x94\xef\x3b\x58\x09\xdd\x20\x58\xac\xb5\x28\x50\x42\xbe\x01\x87\x92\x45\x7d\x85\x27\xa7\x10\x3f\x54\xa3\x71\x95\x2a\xfd\x52\x38\x8f\xd6\x15\x56\xd5\x7e\xe8\x2a\x58\x57\xaa\xa8\x40\x39\x30\xe4\x41\x49\x14\xfa\x2d\xac\x11\x5c\x45\x8d\x96\x50\xaa\x3b\xf0\x95\xf0\xc3\x13\x4d\x85\xd0\x41\x39\x39\x01\xa8\xc8\x79\xc7\x5f\x00\xda\x37\xfc\x7c\x02\xb0\x12\x36\xfe\x2a\x8c\x53\xb9\xc6\xac\x20\x63\xb0\xf0\x8a\x4c\x12\x24\x8f\xde\xd6\x1b\x5f\x91\xc9\x94\xf1\x68\x6b\x8b\x0c\x0e\xa3\xc6\xd9\x51\xae\xcc\x28\xbc\x7c\x7f\x72\x72\x74\x80\x4c\x39\x06\xf5\x42\x19\xb4\xea\x0f\x94\x09\x7c\x12\xda\x61\x47\xaa\xd0\x0a\x8d\xcf\x72\x65\x84\xdd\x1c\x80\x52\xf1\x08\x6f\x29\x16\x98\x79\xb1\x48\xa0\xb7\xfa\x57\x3a\x9d\x5d\x4c\xae\x7a\x41\xe8\x14\xae\x26\xf3\x34\x81\x79\xa5\x1c\x14\x95\x30\x0b\x74\x1c\xda\x0f\xc3\x77\x7f\xff\x1b\x08\x23\x43\x0a\x84\xd6\x50\xd0\x32\x57\xa6\x8d\xbc\x27\x10\xe0\x94\x59\x68\xe4\x80\x28\x91\x6b\x8c\x70\xe1\xd3\x18\x89\x96\xf3\x03\x46\x2c\xf1\xc0\x95\x35\xe6\x7c\x36\x47\x1a\xa3\x5b\x2c\xd0\xf1\xf7\x50\xa8\xb6\x58\xaa\xbb\x04\x7a\x17\x97\xe3\xcf\x69\x76\x3d\x4d\x3f\x5d\x7c\x1d\xf5\x5e\xd0\xd8\xc3\xee\xb4\xe6\xdf\xae\xd3\xc1\x1a\xf3\x41\x94\x79\x49\x7d\x85\xd6\xb5\xb9\xec\x86\x8a\x2c\x2e\xb2\xc6\xea\x2c\x72\x04\xfa\x97\xe3\xd9\x3c\x9d\x66\x93\x69\xfa\x39\xbb\x99\x7e\x19\xbc\xb9\x2f\x68\x59\x93\x41\xe3\x1f\x92\x37\xf7\x11\xe9\xa1\xdf\xb1\x16\xf4\x33\x89\xa5\x68\xb4\xcf\x5c\x93\x4b\x5a\x0a\x65\x12\xe8\xcf\xd3\xcb\xeb\xe9\xe4\x66\x9e\x4e\x2f\xae\x87\x46\xd5\x43\x45\xfd\x6d\xa2\xda\x8a\x89\xdf\xdb\x74\x45\x06\xe7\x08\x7e\x53\xe3\x47\x65\x4a\x2b\xde\x42\xde\x78\xa6\x77\x25\x56\x08\x9e\x40\xab\x15\xc2\x5a\xf9\x0a\x2c\x2e\x14\x99\x20\x06\x25\x59\x30\xb4\x8e\x70\x39\x16\xa2\x71\x08\x54\x82\xc6\x85\x28\x36\x60\x51\x38\x32\xae\xe3\xb9\xa5\x86\x3d\x77\xa8\xb1\xf0\xc4\x21\x38\x04\xed\x9e\x93\xdf\x39\x6f\x37\x2f\xcb\x9f\xc2\x15\xb5\x65\x09\xeb\x6a\x03\x9e\x0f\xa6\x1c\x08\x90\xaa\x2c\xd1\xa2\xf1\x20\x85\x17\xed\x11\xc3\xe1\x94\x07\xd5\x75\xac\xb6\xb4\x44\x5f\x61\xe3\x32\x43\x12\x0f\x2c\xde\x47\x93\xfd\x04\xfa\xc1\xea\xc3\x36\xa0\x33\xf4\x30\xfe\xa3\xb1\x08\xae\xc6\x42\x95\xaa\x08\xa6\x38\x36\x5c\xed\x28\x61\x7b\x84\x8e\xb9\xf0\xf6\xe0\x80\x9e\x2c\x93\xae\xb6\xb4\x52\x92\xb9\xd1\x13\x0c\x9c\xe5\x9a\xf2\x2e\xd9\x9e\x53\xbe\x55\x46\x26\xd0\xa3\xfc\x7f\x58\xf8\xd7\x2a\xed\xcd\x64\xa2\x28\xa8\x31\x3e\x10\xbf\x3f\x4d\x3f\x5f\xcc\xe6\xd3\x6f\xd9\x6c\x3e\x99\x72\x0d\x8c\xff\x7d\x33\x4d\xb3\xf1\xd9\xd9\xe4\xe6\x6a\x7e\x35\xbe\x4c\xbb\xe9\x7a\xbd\x89\x5b\xdc\x7c\xd7\xc2\x6f\xe9\xb7\x1f\x30\xb0\x6b\x7b\x09\xf4\xb6\x72\x3f\x10\x0a\x8b\x42\x2f\x13\xe8\x15\x64\x71\xb8\x56\x46\xd2\xda\x0d\x0d\xfa\xde\x13\xb5\x14\x7f\x39\x17\x56\x42\x41\x12\x03\x05\x63\x7d\x0c\x8f\x64\xce\x1a\xcb\x84\xd4\x9b\xb6\xb3\x15\xba\xe1\x3a\x06\xe7\x85\x47\x10\x1e\x24\xd6\x9a\x36\x4b\xa6\xac\x57\x4b\x04\x49\x18\x86\x4e\xa8\x45\x6e\x86\x24\xd1\x45\xb0\xe0\x3f\x5a\x94\x5c\xa6\xfc\x36\xf4\x85\x50\xac\x5a\xe4\xa8\x1d\x88\xba\xd6\x0a\x65\xdb\x88\x2d\x0a\xb9\x61\xd9\x1c\xe1\xf7\x06\xad\x42\x19\xa1\xc4\x42\x28\xe3\x3c\xfb\xc0\x38\x35\x29\xe3\xdb\xf9\xc8\x5e\x84\x39\x19\x9d\x6b\x47\x5d\x10\xd2\x62\x93\x13\xdd\x3a\xb0\x8d\x19\xc2\x58\x3b\x7a\x1b\xe1\xf8\x75\xa8\xf4\x76\xe8\xaa\x42\x40\x9b\x76\x88\x1d\x0b\x7a\xa5\xd0\xda\x41\x2e\x8a\xdb\x1e\x3b\xf4\x8e\xfd\xb4\x54\x5b\x25\x3c\xea\x0d\xac\x2b\xb4\x08\xc2\x1d\xe2\xc5\x6c\xed\x10\x35\x2d\xb8\xde\x62\x88\x86\x30\x6f\x75\xd6\x82\x67\x8d\x23\x90\xca\x15\x8d\xe3\xf6\x09\x22\x27\xae\xfa\x32\xa2\x85\x79\xb4\xb7\xc7\x0e\x48\x0a\x59\x8b\x7d\xae\xb5\xe0\xe0\xe3\x47\x08\xcd\xae\x0d\xfb\xae\xc9\x31\x40\xc4\xaa\xd1\x96\x58\x70\x42\x4b\x14\xed\xbc\x66\xb4\x36\x5d\xc2\x1c\x28\xc7\xf3\x57\x6a\x51\xb5\x63\x4d\x6c\x63\xea\x94\xdd\x65\x61\x6b\xf7\x98\x33\x53\x5c\x29\xa7\x3c\x68\xc1\xf1\xfc\x4b\x4d\x8e\xed\x6c\xb8\xc1\x09\x5c\x92\x71\xe8\x81\x2c\xbc\x21\x5f\xa1\xfd\xeb\x33\x54\x0f\x6d\x77\x6b\x20\x81\x77\xdf\x29\x89\x43\xc9\xe7\x3b\x65\x4b\xdc\x04\x7a\xb5\x45\x87\xe6\x51\xcb\xe9\xf6\x54\xbc\xab\xc9\xb2\x1f\x61\xc0\xee\xe6\x64\xff\x7a\x3a\xb9\x4c\xe7\xe7\xe9\xcd\x2c\x4b\xbf\x5e\x4f\xa6\x3c\x13\xe3\xe8\xec\x77\xcd\x1f\x0d\x6a\xe3\xbc\xd0\x3a\x81\xb9\x6d\xf0\x05\x3f\x83\xbd\xd7\x2d\x02\xfb\x1a\xcc\x78\x62\x24\xd0\x1f\x5f\xcd\x2e\x7e\xf9\x92\x66\xbf\xa6\xd7\x5f\x26\xdf\xda\x55\x20\x7a\xb5\x5d\xd1\x1c\xda\x95\x2a\x30\xcb\x2d\xdd\xf2\xf1\x8e\xdc\x3a\x86\xdf\x8a\x16\xc2\x0b\x4d\x8b\x17\x5d\x3b\x58\x3d\xba\x5e\x3e\x0d\xb3\x8f\xe8\x6a\x17\xbe\x27\x37\x07\xf4\x85\xe4\x4d\xc4\x25\xf0\x9f\x5e\xe5\x7d\xed\x92\xd1\xe8\x7c\x32\x6b\xdb\x7a\xf2\xfe\xc3\x3f\x7e\xee\xfd\x37\x1c\xd1\xe3\xb2\x66\xe6\x75\xcf\xf8\x52\x30\x9f\xd3\x79\x6e\xaf\xda\xca\x0f\xa2\xfc\x20\xc8\xbf\x0c\xf6\xec\x96\x75\xb4\x90\x2a\x07\x16\x65\x63\xa4\x30\x3e\x0c\x7f\x8b\xbf\x37\x2a\xf6\xcc\x4a\x18\xc9\x0b\x68\x80\x02\x77\x8b\x6b\xc8\xd1\xaf\x11\xcd\xd1\x32\xba\x8b\xdf\x20\x66\x9c\x7b\xe2\xc4\xaa\x05\xdf\x36\x8c\x84\xc9\xd9\x75\x27\xce\x78\x27\x96\xb5\xc6\xb0\x86\x33\x97\x0e\xdc\x9d\x9d\x4f\xa6\x73\xe6\x77\xaf\x4b\xd9\x82\x8a\xdb\x5a\x6d\x39\x78\x18\xe5\xb3\xc9\xd9\x6f\xd7\x17\xf3\xe7\x48\xfb\x48\x31\x17\x0e\x63\xa0\xb7\xaa\xbf\x8c\x67\x29\xa7\xf7\xbb\xba\x7b\x57\xb7\xaa\x4f\x07\xf8\x57\x6a\x67\x93\xc4\x52\x19\xfc\x6e\xcf\x01\x47\xfb\xd9\xe1\x85\xbb\x85\x52\xed\x76\xfe\x20\x3b\xdc\x2c\x35\x28\xe3\x94\x0c\xc3\xae\x0b\x09\x96\x34\x42\x69\x69\xf9\x44\x46\xd6\x4a\xeb\xed\x34\x6b\x3c\xd5\x54\x37\x4c\x1b\x5e\xf6\x1a\xbe\x68\x3c\x89\xd8\xdd\x47\xdb\x6c\xf2\x68\xdc\x28\xb3\x38\x18\x3e\xa6\x59\xe6\x68\x79\xbb\x3d\x98\x07\x5d\xba\xed\x6f\x32\x2e\x5e\x46\x85\x65\xa6\x78\xb4\x46\x68\xe6\xdb\x63\xaf\xd9\xde\x1a\xfb\xf6\xf8\xf2\x13\xb6\xc0\xe8\xf4\x12\xda\xc1\xc6\x37\xa6\x15\x29\xd9\xfa\xa3\x4c\xc1\x9b\x04\xef\x1b\xce\xb3\x5b\xa5\x28\x3c\x94\xca\xc8\xad\xdf\x3b\x53\xe1\xaa\x09\x7c\xf9\x5a\x92\xd9\x3e\xf1\xb3\x29\xd5\xa2\x25\x4a\x02\x23\xf4\xc5\x88\x5a\x42\x8f\x76\x12\xaf\xe4\x70\x10\xde\x5f\x7b\xe3\x11\x9a\x5c\xab\x62\xd7\xa9\x1b\xab\x13\xd8\x75\x9a\xfb\x7b\x18\xa6\x77\x21\x2e\x97\xad\xe2\x39\xb9\x76\xef\x84\x87\x87\xe4\x9f\x3f\xfd\xf4\x61\x74\x74\xcf\x3a\x00\x14\xb5\xfa\xd3\x60\x7b\x90\xd7\xf4\x3d\x38\x8d\xff\x2c\x5c\x11\x68\x32\x0b\xb4\x60\x10\x25\xef\x50\x8e\x43\xfd\x28\x89\x83\x0f\xc3\x9f\x87\xef\xdf\x0f\xde\xc5\x9b\x53\xdf\x62\x5b\x18\x64\x02\xe1\xb7\xed\x65\x83\x3e\x3a\xc2\xf4\xd9\x87\x8a\x9f\xe2\xce\xbd\x75\xa5\x7f\x72\x42\x14\x9b\xb5\xcb\x3c\x65\x21\x5b\xcf\xfe\xab\xf0\xff\x00\x00\x00\xff\xff\x14\x49\xa7\xb3\x12\x11\x00\x00") func masterTmpAnsibleAzureLocalMasterInventoryYmlBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml index 9a62edd9dc..80bd0df15d 100644 --- a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml +++ b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml @@ -87,7 +87,7 @@ etcdStorageConfig: openShiftStoragePrefix: openshift.io openShiftStorageVersion: v1 imageConfig: - format: IMAGE_PREFIX/IMAGE_TYPE-${component}:${version} + format: MASTER_OREG_URL-${component}:${version} latest: false imagePolicyConfig: internalRegistryHostname: docker-registry.default.svc:5000 diff --git a/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml b/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml index 790f0c7d52..56f8c5d5a0 100644 --- a/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml +++ b/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml @@ -19,7 +19,7 @@ localmaster: openshift_web_console_image_name: "IMAGE_TYPE-web-console" openshift_web_console_version: "vVERSION" - oreg_url_master: 'IMAGE_PREFIX/IMAGE_TYPE-${component}:${version}' + oreg_url_master: 'MASTER_OREG_URL-${component}:${version}' openshift_master_default_subdomain: 'TEMPROUTERIP.nip.io' # FIXME diff --git a/pkg/openshift/certgen/unstable/templates/bindata.go b/pkg/openshift/certgen/unstable/templates/bindata.go index 0046d9424c..ffc7da2d85 100644 --- a/pkg/openshift/certgen/unstable/templates/bindata.go +++ b/pkg/openshift/certgen/unstable/templates/bindata.go @@ -118,7 +118,7 @@ func masterEtcOriginMasterHtpasswd() (*asset, error) { return a, nil } -var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x7b\x6f\x1b\xb9\x11\xff\x5f\x9f\x82\x08\x0e\x48\x52\x74\x77\x25\x3b\xcf\x05\x8a\x42\xb5\x9d\x8b\x70\x76\xa2\xca\x4e\x71\x45\x5d\x1c\x28\x72\xb4\x62\xc4\x25\x37\x7c\x28\x56\xdc\x7c\xf7\x82\x8f\x7d\x4a\x4a\xd2\x5c\xd0\x73\x02\xc4\x4b\xfe\x66\x38\xe4\xfc\x38\x0f\x06\xd3\x92\x69\xcd\xa4\x38\x93\x62\xc5\x8a\x7c\x84\x50\xc5\x6d\xc1\x3a\xdf\x08\xfd\xcd\x32\x4e\xcf\x61\x85\x2d\x37\x3a\x0c\x21\x44\x3c\xc0\x2a\x6c\x98\x14\xf5\x20\x42\xb8\x62\xff\x00\xe5\x34\xe6\x68\x3b\x69\x86\x41\x6c\x73\xf4\xaf\x7f\x37\xdf\x1b\x26\x68\xde\x57\x1c\x56\x6c\x10\x0a\xb4\xb4\x8a\x80\x6e\x75\x23\xc4\x59\xc9\x8c\xce\xd1\xfd\xe7\xce\xa0\x82\x0f\x16\x74\x67\xd8\xab\x7d\xbb\x05\xa5\x18\x85\xef\x34\xb8\x63\x60\xa3\xa9\x63\xe1\x5c\xd2\xb9\x02\x0d\xe6\xfb\xb4\x53\xa6\xf1\x92\x43\x8e\x56\x98\x6b\x18\x2c\x1a\x0f\x64\xda\x77\x8d\x07\xc9\x0a\x84\x5e\xb3\x95\x49\x99\xcc\x66\x25\x2e\x60\x2e\x39\x23\xbb\xef\x74\xca\x1d\x10\xeb\x90\x0b\xcb\xbb\xe7\x9c\xa0\x12\x1b\xb2\xf6\xfa\xa7\x42\x48\xe3\xd5\xf5\x1c\x91\xa0\x0d\xec\x72\xc4\x1c\x44\xa7\x3d\xb3\x28\x88\x5d\xd2\xa8\xee\xc8\x20\xb4\xc5\xdc\x42\x8e\x1e\x1a\x65\xe1\x61\x67\x46\xe0\x12\xf2\xd6\x9c\x84\x82\x60\x40\x3b\x00\x29\x16\x87\xe8\x90\x34\x2c\xc9\x51\x25\xa9\x3e\x32\xb5\x74\x5e\xd4\x3d\xc6\xbc\x07\x62\x72\xe4\xec\xe8\x0c\xeb\x0d\xab\xde\xfa\x95\xb8\xb7\xe3\x15\x66\xdc\x2a\x18\xe0\x82\x93\x3a\x87\x1f\xfd\x83\x8b\x42\x41\x81\x8d\x54\x9d\xbb\xa4\xe4\xdd\xee\x8c\x33\x10\x66\x26\x56\x32\xd8\x4e\x40\x99\x57\xcc\x79\xbf\x15\x49\x56\x4a\x0a\x93\x78\x7c\x4a\x94\xf1\xc0\x0d\xec\xbe\x88\xdb\xc0\x6e\x84\x2b\x76\x09\x5b\xe0\x3a\x1f\x25\xce\xb7\x03\x57\x63\x6b\xd6\xad\x39\xf1\xa6\xbc\x06\x4c\x41\x45\x63\xbc\x71\x67\xd3\x1c\x75\x34\x27\x04\x37\x46\x44\x80\x2c\x4b\x29\xde\xe0\xb2\x76\x40\x72\xc4\xa8\x51\x20\x96\x51\x38\xac\x32\x57\xb0\x62\x77\xad\xd4\xaf\xc9\x02\x4a\x69\x20\xb9\x70\x98\xc4\x8f\x16\x4a\xda\x2a\xc0\xf7\x71\x3f\xbb\x49\x3f\x68\x35\x28\xc7\x94\x63\xc8\x77\x1a\xd4\x88\x48\x61\x94\xe4\x1c\x3a\x5e\x00\x0e\xa4\xbd\x10\x5c\x92\xcd\x1b\x4f\xb8\x86\xb6\x49\x89\xb5\x01\x95\xb4\xc2\x8e\x2d\x1a\xd4\x96\x11\xb8\x76\xff\x88\xe2\x0c\x54\xbc\xec\x9a\x15\xa2\x3e\xbe\xae\x37\x23\x3e\x09\xf3\xcd\x01\x76\xfc\x38\x40\x38\xff\x75\x96\xcc\xd1\xc3\x3f\x3d\x1c\x11\xa9\xf4\x94\x73\xf9\x11\xe8\x5b\xc5\x0a\x26\xbc\x67\x1f\xfd\x95\x3d\xce\xb2\xc9\xc9\xf3\xdb\x74\xec\xff\x4e\x1e\xe5\xff\xb9\xfd\xf4\xb8\x99\xe2\x92\x60\xbe\x96\xda\x0c\xc6\xef\xef\xd1\xdf\xad\x34\x70\x05\x06\xa3\x47\x4c\x50\xb8\x43\xe9\x95\xdf\x6e\x3a\x9b\x6b\x34\x7e\x9c\x5e\x1b\xc5\x44\x81\x3e\x7f\x1e\x88\x6e\xec\x12\x94\x00\x03\xfa\x36\xa5\x21\x26\x7d\x1d\x71\x9b\xea\x2d\xb9\x4d\x09\xb7\x6e\x89\xdb\xd4\xdb\x75\x54\xec\x4b\xc6\xa6\x17\x77\xc6\x39\x9c\x07\x6b\x5f\x4b\x6d\x9c\xf7\xf7\xed\x6c\xdc\x78\xcc\xcc\xbe\xda\xb8\xf9\xff\x45\x9f\xdf\xd4\x37\xee\x7d\x00\x9b\x3c\x3f\xb9\x4d\x4f\x0f\xfb\xec\xc8\x42\x5f\x39\xbd\x46\x2a\x8e\x53\xa1\x5b\xaa\x2f\x99\xa0\x53\x4a\x15\x68\x9d\xa3\x71\xea\xff\xe4\x2f\xc6\x4f\x4f\xe3\xdc\x1b\x30\x1f\xa5\xda\xe4\xc8\x90\xea\xc9\x08\x0c\xa1\xfd\xe8\x44\x70\x8e\xc2\x65\x48\xdd\x64\x1b\x08\x5a\x9a\xf7\xa6\xbd\x70\x84\x34\x34\x3f\x80\x70\x54\x47\xc8\x2a\xee\xaf\x6d\x82\xd6\xc6\x54\x3a\xf7\xae\x39\xe0\x90\xfc\xe4\xf4\xf9\x4b\x6f\xdd\xb5\x91\x0a\x17\xd0\x6e\xb0\x3d\xf6\x38\x15\x02\x4c\xde\x99\x48\x99\x3c\x04\xec\x67\x40\x77\x8a\xd7\xee\x14\x07\x6a\xba\xa9\xec\x00\xac\xab\xc4\x27\xbf\xd6\xb2\x95\x54\x25\x36\x39\x9a\x5d\x4d\x7f\xbe\xf8\x6d\xbe\xb8\x78\x35\xfb\x35\x0b\x1f\x37\xff\x9c\x5f\x24\x3f\xdd\x13\x59\x56\x52\x80\x30\x9f\xf3\x9f\xee\xb7\x41\x93\xab\x58\x38\x36\xa0\x4d\x5d\x0c\xb0\x61\x66\x71\xca\x99\x08\x77\x61\x01\x05\xd3\x46\xed\xea\xc3\xca\x11\x95\x64\x03\x2a\x51\x71\xa2\x66\x92\x23\x52\xfe\x74\x3c\x1e\x8f\x42\xbe\x0a\x87\x1c\x53\x95\x3b\x1b\x0e\x66\xdf\xf5\x04\x27\x4b\x2b\x28\x87\x63\x5e\x8f\x92\x5f\x76\xfc\x00\x14\x7c\x5f\x49\x65\x72\x34\x19\x9f\x3c\x1d\x8f\x5a\xdf\x74\xcd\x72\x46\xe0\x8a\xb9\x78\x0b\x6a\xaa\x0a\x5b\x82\xa8\xeb\x4d\x65\x85\x61\x25\x24\xa4\x53\x96\x26\x0e\xad\x33\x0d\xc6\x30\x51\xe8\x74\xf3\xc2\xb9\x3e\xdb\x4e\x30\xaf\xd6\x78\xf2\x97\x26\x6b\xeb\xe0\xbb\x64\x89\xc9\x06\x04\xad\xa5\x1d\xbf\x4e\x7b\x80\x12\x28\xc3\x89\xd9\x55\xd0\xae\x50\x71\x46\x7c\xfd\x93\x6d\x05\x4d\x3b\x2c\xab\x94\x34\x72\x69\x57\x31\x4b\x4a\x4b\x5d\x06\xdc\xb2\x26\xb5\x26\xe8\x01\xfe\x64\x15\x3c\xe8\x20\xfa\xf6\x3f\xc8\xc0\x90\x4c\xfa\x30\x9f\x79\x40\xad\x21\xf3\x92\xa9\x83\x3b\xf1\x36\x4b\x0c\xce\x25\x46\x0a\x9f\x51\x98\x28\x12\xe7\xb0\x64\xe5\x9c\x71\x68\x89\xe0\x9f\x2c\x5c\xea\x07\x07\x15\x6c\x60\xf7\x2d\xf2\x1b\xd8\x3d\xf8\x7f\x6c\xbc\x8c\xfc\xb0\xc2\x91\xa7\x19\x98\xcd\x73\x74\x7f\xff\xb5\x7c\xe6\x59\x47\x2f\xb6\xcc\x67\xff\x1b\x56\x82\xb4\x26\x47\xc2\x72\xfe\xf5\xda\x2c\x72\x39\xd6\x43\x5d\xba\xef\x13\xbe\x07\x0a\x74\xd7\x64\x0d\xd4\xf6\x1c\x56\x2f\xdc\x4c\x05\xda\x07\x4d\x07\x8e\xb9\xc1\xa5\xef\xb5\x2f\xa4\x63\xf9\xa0\xdf\x48\x0a\x73\xa9\xcc\x02\x8b\xc2\x95\xd3\x0f\x3b\x73\xd7\x76\x29\xc0\x9d\xd5\xf3\x93\xf4\xd4\x87\xff\x6c\xf2\xcc\xcd\xbb\x22\x9e\x38\xc9\x50\xc4\xb9\x66\x2c\x1e\xae\x37\xdb\xf3\x09\x62\xc6\xfd\xa5\x61\xf9\x59\xac\xfe\x84\x08\x25\xd4\xa0\xab\xc2\x84\x40\xe5\xa6\x0d\x08\x73\xb3\xab\x9c\xe2\x6f\xb8\x32\x7f\xee\x62\xe2\xe6\x10\x5a\x5a\xe5\xa2\xe0\x93\xf1\x78\x14\x7b\x99\x5a\xeb\x37\x29\xf5\x42\x1f\x2a\x9d\xa3\x13\xaf\x61\x7f\x33\xee\xb7\x18\x6a\xc2\xa1\x35\xc1\xfe\x52\xca\xca\x45\x87\x3f\x60\xbb\xcf\x7e\xf7\x76\x4f\xbd\x86\xbd\xbd\x74\x77\x3b\xac\x74\xbd\xc2\x70\x25\x23\x0b\xe6\x76\xc9\x19\x79\xb7\xb8\xcc\x7b\xb9\xf9\x68\x0d\x96\x77\x32\xb7\xe3\xa2\xbb\x6e\x22\x14\x17\x6d\x34\x8f\xc1\x25\x16\x1d\x67\xb3\xf3\x85\xcb\x00\xe9\xe4\xe4\x45\x20\xe6\x93\x3d\x4c\x2c\x0f\x08\xa3\x6a\x1f\x8a\x90\x2b\x70\x03\xc3\x2f\x41\x14\x66\x9d\xa3\x97\x1d\x4f\xcf\xe6\x9d\x95\xa2\xa6\x58\x03\x65\xee\x88\x0e\x4b\x47\xab\xe7\xfe\xd9\x23\x34\x06\x0a\xe8\x1a\x9b\xb6\xd2\x4a\xe4\x56\x27\xda\x4b\xb6\x57\xad\xbf\xab\xde\x75\x93\xfd\xb6\x0b\x6b\x0d\xe6\x07\x1c\x70\x46\xa4\xd0\x92\x43\x36\x72\x3d\x13\xf6\x44\x6d\x82\x6a\x09\x66\x2d\x69\x8e\xb0\x35\xae\x70\x61\x14\x84\x61\x66\x37\x8f\x71\x55\xe7\xa3\xfb\xfb\x04\xb1\x15\x4a\x2f\x04\x5e\x72\x98\x4e\xcf\xa7\xd6\xac\x1d\x2a\x10\xcd\xc7\xcb\x24\xf6\xe2\x53\x17\x85\xd1\xf4\x3c\x50\x73\x8d\x39\x07\x1f\x6b\xda\xf7\x0a\x2e\x0b\x26\x3a\xad\x71\x89\xab\x8a\x89\xe2\x2a\x9a\x41\x38\x66\xa5\x9f\xe8\xe7\x86\x23\x8f\x11\xa1\x48\x79\x5b\x81\x98\x9d\xcf\x06\xa6\xd7\x8d\x56\x08\xd5\xe7\x3e\xf2\xa7\xde\xc0\xb0\xff\x74\x3a\x3d\x8f\x71\xfc\x3c\x44\xfd\x16\x7e\x0d\x44\xb9\x70\x78\x54\x24\x00\xba\x62\x98\x95\x9d\x47\x06\x46\xbb\xef\x22\xda\x2e\x9b\xaf\x4a\xc1\x0a\x94\x02\xfa\x2e\xf6\xa5\x5d\xa0\x15\xec\x83\x85\xdf\xdc\x70\x33\x3a\xc4\xf4\x26\xa1\xc4\x8c\x77\x67\xfd\x40\xfc\xae\xeb\xe6\x78\x80\xd6\xac\xa5\x62\x9f\xa0\x65\x92\x77\x46\x5a\x32\xa2\xa4\x96\x2b\x23\x05\x67\xc2\x25\xd1\x32\x1b\x6e\xfc\x06\x04\x8e\x07\x95\x79\x9a\x9e\x64\x8d\xbe\x66\x05\x23\x37\x20\x7e\x90\x76\xaf\xcb\x73\x0f\x04\xed\x71\xec\xd2\x35\x37\xa8\xc2\x5a\x7f\x94\x8a\x0e\x99\xd6\x10\xeb\xc7\x12\x6d\x75\x2c\xdb\xae\x8d\xb7\x84\xf6\x08\xf9\xfa\x66\xee\x07\xe7\xd1\xc8\x03\xd4\x8c\x49\x74\xba\x5f\x3c\xff\xb8\xc0\x5a\xeb\xfa\xbd\x5a\x34\x0c\xde\x7a\x9b\xa1\x2b\x7c\x37\x2d\xe0\xda\xe5\x04\xea\x52\x4a\x9d\x95\xe2\x74\x08\x8b\x5a\x8b\xee\x60\xb8\x3a\xfa\x78\xfd\x12\x60\x89\x0e\xb8\x74\x87\x4b\x47\x68\x4f\x88\xae\x09\x2e\xab\x6a\x7d\xe3\x86\x07\x66\xbc\x78\x56\x17\x03\x0d\x47\x0f\xc1\x9e\x8e\xc7\xa3\x0a\x5b\xed\x58\xd8\x3e\xa6\x84\x50\x55\x0d\x1a\xa9\xa5\x94\x46\x1b\x85\xab\xd0\x61\x1d\x35\x3e\xc8\xd5\x95\x57\x93\x09\x66\x62\xa5\xb0\x36\xca\x12\x63\x55\x28\xa5\x2a\x4c\x7a\x6f\x49\xcc\x41\xba\x32\xd7\x6b\xac\x80\x36\x0f\x98\x87\x84\x46\x95\x92\xef\x81\x74\x02\x7a\x6c\xe3\x5c\xc1\x76\xed\xdf\xaf\xa4\xca\x91\x90\x14\x12\x25\x39\xa4\xbd\x6e\x37\x73\x8d\xa5\x35\x50\x77\x3c\x51\xd9\x22\xbc\xf6\x5d\x81\xd6\xb8\xa9\x13\xfb\x73\x37\x50\x56\xae\xfb\x6c\x8a\x48\x62\x15\x33\xbb\x29\xe7\x92\x60\xb7\x64\xb8\x71\x44\x37\x23\xb1\xe6\xd4\xe3\x3c\x3b\xa9\x27\x2f\xf1\x12\xb8\x9e\x83\x9a\x07\xe5\x39\x7a\x1a\x1e\xec\x18\x1d\xca\x4d\xc6\xf5\x4f\x32\x79\x59\xff\x64\x7e\x74\xa4\xa4\x75\x8d\x5c\x7b\x06\xda\x2e\xa9\x2c\xb1\xbb\xfd\x37\x17\x57\xf3\xc5\xdb\x77\x37\x17\x8b\xd9\x3c\x15\xac\x72\xdd\x79\xcc\xc3\x53\x42\x5c\x7b\xd0\x8a\xf9\xff\x24\x08\xe4\x5c\xb8\x10\x0d\x82\x80\x6e\x53\x57\x89\x05\x2e\x80\x36\x6f\x99\x49\x7d\xd6\xfe\x77\xff\x56\xec\x2f\xb6\x1b\xaf\xb8\xdc\x7d\xe5\x96\x57\x8a\x6d\xb1\x81\x5f\x06\x6f\x7c\x38\x58\xe5\xea\x35\x3f\x5f\x77\xc1\x3e\x1a\x44\x70\x5c\x7e\x4f\xc2\x63\xbc\x80\x0e\x6f\x8f\x75\x7f\x72\xf0\x75\xe7\xe0\x4d\xdf\x7f\xeb\x39\xd0\xdb\x68\xdf\x69\xd7\xad\x7e\xf3\x10\xdc\xbc\xf9\x0c\xdb\x9c\x88\x0f\x3b\x29\xf1\x5d\x24\x91\x9e\x89\x57\x9c\x15\x6b\x13\x6e\x62\xf3\xcc\x1c\x1b\xae\x7e\x50\xd9\x4a\x6e\xcb\xce\xab\x09\xdd\x09\x5c\x32\xe2\x03\xaa\x8b\x16\x4c\x14\xa1\x3e\xa1\x31\xe4\xff\x37\x00\x00\xff\xff\x51\xa6\x87\xe7\xa2\x1a\x00\x00") +var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x6b\x6f\x1b\xbb\xd1\xfe\xae\x5f\x41\x04\x07\xc8\xc9\x8b\x77\x77\x25\x3b\xd7\x05\x8a\x42\xb5\x9d\x13\xe1\xd8\xb1\x2a\x3b\x45\x81\xba\x08\x28\x72\xb4\x62\xc4\x25\x37\xbc\x28\x56\xdc\xfc\xf7\x82\x97\xbd\x4a\x4a\xd2\x9c\xa0\x75\x02\xc4\x4b\x3e\x33\x1c\x72\x1e\xce\x85\xc1\xb4\x64\x5a\x33\x29\xce\xa4\x58\xb1\x22\x1f\x21\x54\x71\x5b\xb0\xce\x37\x42\x7f\xb1\x8c\xd3\x73\x58\x61\xcb\x8d\x0e\x43\x08\x11\x0f\xb0\x0a\x1b\x26\x45\x3d\x88\x10\xae\xd8\xdf\x40\x39\x8d\x39\xda\x4e\x9a\x61\x10\xdb\x1c\xfd\xe3\x9f\xcd\xf7\x86\x09\x9a\xf7\x15\x87\x15\x1b\x84\x02\x2d\xad\x22\xa0\x5b\xdd\x08\x71\x56\x32\xa3\x73\xf4\xf0\xa5\x33\xa8\xe0\xa3\x05\xdd\x19\xf6\x6a\xaf\xb7\xa0\x14\xa3\xf0\x83\x06\x77\x0c\x6c\x34\x75\x2c\x9c\x4b\x3a\x57\xa0\xc1\xfc\x98\x76\xca\x34\x5e\x72\xc8\xd1\x0a\x73\x0d\x83\x45\xe3\x81\x4c\xfb\xae\xf1\x20\x59\x81\xd0\x6b\xb6\x32\x29\x93\xd9\xac\xc4\x05\xcc\x25\x67\x64\xf7\x83\x4e\xb9\x07\x62\x1d\x72\x61\x79\xf7\x9c\x13\x54\x62\x43\xd6\x5e\xff\x54\x08\x69\xbc\xba\x9e\x23\x12\xb4\x81\x5d\x8e\x98\x83\xe8\xb4\x67\x16\x05\xb1\x4b\x1a\xd5\x1d\x19\x84\xb6\x98\x5b\xc8\xd1\x63\xa3\x2c\x3c\xee\xcc\x08\x5c\x42\xde\x9a\x93\x50\x10\x0c\x68\x07\x20\xc5\xe2\x10\x1d\x92\x86\x25\x39\xaa\x24\xd5\x47\xa6\x96\xce\x8b\xba\xc7\x98\x0f\x40\x4c\x8e\x9c\x1d\x9d\x61\xbd\x61\xd5\xb5\x5f\x89\x7b\x3b\x5e\x63\xc6\xad\x82\x01\x2e\x38\xa9\x73\xf8\xd1\x3f\xb8\x28\x14\x14\xd8\x48\xd5\xb9\x4b\x4a\xde\xef\xce\x38\x03\x61\x66\x62\x25\x83\xed\x04\x94\x79\xcd\x9c\xf7\x5b\x91\x64\xa5\xa4\x30\x89\xc7\xa7\x44\x19\x0f\xdc\xc0\xee\xab\xb8\x0d\xec\x46\xb8\x62\x97\xb0\x05\xae\xf3\x51\xe2\x7c\x3b\x70\x35\xb6\x66\xdd\x9a\x13\x6f\xca\x1b\xc0\x14\x54\x34\xc6\x1b\x77\x36\xcd\x51\x47\x73\x42\x70\x63\x44\x04\xc8\xb2\x94\xe2\x2d\x2e\x6b\x07\x24\x47\x8c\x1a\x05\x62\x19\x85\xc3\x2a\x73\x05\x2b\x76\xdf\x4a\xfd\x3d\x59\x40\x29\x0d\x24\x17\x0e\x93\xf8\xd1\x42\x49\x5b\x05\xf8\x3e\xee\x37\x37\xe9\x07\xad\x06\xe5\x98\x72\x0c\xf9\x4e\x83\x1a\x11\x29\x8c\x92\x9c\x43\xc7\x0b\xc0\x81\xb4\x17\x82\x4b\xb2\x79\xeb\x09\xd7\xd0\x36\x29\xb1\x36\xa0\x92\x56\xd8\xb1\x45\x83\xda\x32\x02\x37\xee\x1f\x51\x9c\x81\x8a\x97\x5d\xb3\x42\xd4\xc7\xd7\xf5\x66\xc4\x27\x61\xbe\x39\xc0\x8e\x1f\x07\x08\xe7\xbf\xce\x92\x39\x7a\xfc\x7f\x8f\x47\x44\x2a\x3d\xe5\x5c\x7e\x02\x7a\xad\x58\xc1\x84\xf7\xec\xaf\x7f\x66\x4f\xb2\x6c\x72\xf2\xe2\x2e\x1d\xfb\xbf\x93\x5f\xf3\x7f\xdd\x7d\x7e\xd2\x4c\x71\x49\x30\x5f\x4b\x6d\x06\xe3\x0f\x0f\xe8\xaf\x56\x1a\xb8\x02\x83\xd1\xaf\x4c\x50\xb8\x47\xe9\x95\xdf\x6e\x3a\x9b\x6b\x34\x7e\x92\xde\x18\xc5\x44\x81\xbe\x7c\x19\x88\x6e\xec\x12\x94\x00\x03\xfa\x2e\xa5\x21\x26\x7d\x1b\x71\x97\xea\x2d\xb9\x4b\x09\xb7\x6e\x89\xbb\xd4\xdb\x75\x54\xec\x6b\xc6\xa6\x17\xf7\xc6\x39\x9c\x07\x6b\xdf\x48\x6d\x9c\xf7\xf7\xed\x6c\xdc\x78\xcc\xcc\xbe\xda\xb8\xf9\xff\x44\x9f\xdf\xd4\x77\xee\x7d\x00\x9b\xbc\x38\xb9\x4b\x4f\x0f\xfb\xec\xc8\x42\xdf\x38\xbd\x46\x2a\x8e\x53\xa1\x5b\xaa\x2f\x99\xa0\x53\x4a\x15\x68\x9d\xa3\x71\xea\xff\xe4\x2f\xc7\xcf\x4e\xe3\xdc\x5b\x30\x9f\xa4\xda\xe4\xc8\x90\xea\xe9\x08\x0c\xa1\xfd\xe8\x44\x70\x8e\xc2\x65\x48\xdd\x64\x1b\x08\x5a\x9a\xf7\xa6\xbd\x70\x84\x34\x34\x3f\x80\x70\x54\x47\xc8\x2a\xee\xaf\x6d\x82\xd6\xc6\x54\x3a\xf7\xae\x39\xe0\x90\xfc\xe4\xf4\xc5\x2b\x6f\xdd\x8d\x91\x0a\x17\xd0\x6e\xb0\x3d\xf6\x38\x15\x02\x4c\xde\x99\x48\x99\x3c\x04\xec\x67\x40\x77\x8a\x37\xee\x14\x07\x6a\xba\xa9\xec\x00\xac\xab\xc4\x27\xbf\xd6\xb2\x95\x54\x25\x36\x39\xba\x9a\xde\xdc\x5e\x2c\xde\x5f\x2f\x2e\x7e\x7b\xff\x6e\x71\x99\xfc\xf2\x40\x64\x59\x49\x01\xc2\x7c\xc9\x7f\x79\xd8\x06\x0d\xae\x52\xe1\xd8\x80\x36\x75\x11\xc0\x86\x19\xc5\x29\x65\x22\xdc\x81\x05\x14\x4c\x1b\xb5\xab\x0f\x29\x47\x54\x92\x0d\xa8\x44\xc5\x89\x9a\x41\x8e\x40\xf9\xb3\xf1\x78\x3c\x0a\x79\x2a\x1c\x6e\x4c\x51\xee\x4c\x38\x98\x7d\x97\x13\x9c\x2c\xad\xa0\x1c\x8e\x79\x3b\x4a\x7e\xdd\xe1\x03\x50\xf0\x79\x25\x95\xc9\xd1\x64\x7c\xf2\x6c\x3c\x6a\x7d\xd2\x35\xcb\x19\x81\x2b\xe6\xe2\x2c\xa8\xa9\x2a\x6c\x09\xa2\xae\x33\x95\x15\x86\x95\x90\x90\x4e\x39\x9a\x38\xb4\xce\x34\x18\xc3\x44\xa1\xd3\xcd\x4b\xe7\xf2\x6c\x3b\xc1\xbc\x5a\xe3\xc9\x9f\x9a\x6c\xad\x83\xcf\x92\x25\x26\x1b\x10\xb4\x96\x76\xbc\x3a\xed\x01\x4a\xa0\x0c\x27\x66\x57\x41\xbb\x42\xc5\x19\xf1\x75\x4f\xb6\x15\x34\xed\xb0\xab\x52\xd2\xc8\xa5\x5d\xc5\xec\x28\x2d\x75\x99\x6f\xcb\x9a\x94\x9a\xa0\x47\xf8\xb3\x55\xf0\xa8\x83\xe8\xdb\xff\x28\x03\x43\x32\xe9\xc3\x7b\xe6\x01\xb5\x86\xcc\x4b\xa6\x0e\xee\xc4\xdb\xec\x30\x38\x97\x18\x21\x7c\x26\x61\xa2\x48\x9c\xc3\x92\x95\x73\xc6\xa1\x25\x82\x7f\xb2\x70\x99\x1f\x1d\x54\xb0\x81\xdd\xf7\xc8\x6f\x60\xf7\xe8\xbf\xb1\xf1\x32\xf2\xc3\x0a\x47\x9e\x66\x60\x36\xcf\xd1\xc3\xc3\xb7\xf2\x98\x67\x1d\xbd\xd8\x32\x9f\xf5\x6f\x59\x09\xd2\x9a\x1c\x09\xcb\xf9\xb7\x6b\xb2\xc8\xe5\x58\x07\x75\xe9\xbe\x4f\xf8\x1e\x28\xd0\x5d\x93\x35\x50\xdb\x73\x58\xbd\x70\x33\x15\x68\x1f\x34\x1d\x38\xe6\x06\x97\x7e\xd0\xbe\x80\x8e\x65\x83\x7e\x2b\x29\xcc\xa5\x32\x0b\x2c\x0a\x57\x46\x3f\xee\xcc\xdd\xd8\xa5\x00\x77\x56\x2f\x4e\xd2\x53\x1f\xf6\xb3\xc9\x73\x37\xef\x8a\x77\xe2\x24\x43\xf1\xe6\x9a\xb0\x78\xb8\xde\x6c\xcf\x27\x88\x99\xf6\xf7\x86\xe5\x67\xb1\xea\x13\x22\x94\x4e\x83\x6e\x0a\x13\x02\x95\x9b\x36\x20\xcc\xed\xae\x72\x8a\xbf\xe3\xca\xfc\x7f\x17\x13\x37\x87\xd0\xd2\x2a\x17\x05\x9f\x8e\xc7\xa3\xd8\xc3\xd4\x5a\xbf\x4b\xa9\x17\xfa\x58\xe9\x1c\x9d\x78\x0d\xfb\x9b\x71\xbf\xc5\x50\x13\x0e\xad\x09\xf2\x97\x52\x56\x2e\x3a\xfc\x0f\xb6\xfb\xfc\x0f\x6f\xf7\xd4\x6b\xd8\xdb\x4b\x77\xb7\xc3\x0a\xd7\x2b\x0c\x57\x32\xb2\x60\x6e\x97\x9c\x91\x77\x8b\xcb\xbc\x97\x93\x8f\xd6\x5e\x79\x27\x63\x3b\x2e\xba\xeb\x26\x42\x51\xd1\x46\xf3\x18\x5c\x62\xb1\x71\x36\x3b\x5f\xb8\x0c\x90\x4e\x4e\x5e\x06\x62\x3e\xdd\xc3\xc4\xb2\x80\x30\xaa\xf6\xa1\x08\xb9\xc2\x36\x30\xfc\x12\x44\x61\xd6\x39\x7a\xd5\xf1\xf4\x6c\xde\x59\x29\x6a\x8a\xb5\x4f\xe6\x8e\xe8\xb0\x74\xb4\x7a\xee\x9f\x3b\x42\x43\xa0\x80\xae\xb1\x69\x2b\xac\x44\x6e\x75\xa2\xbd\x64\x7b\xd5\xfa\xbb\xea\x5d\x37\xd9\x6f\xb7\xb0\xd6\x60\x7e\xc2\x01\x67\x44\x0a\x2d\x39\x64\x23\xd7\x2b\x61\x4f\xd4\x26\xa8\x96\x60\xd6\x92\xe6\x08\x5b\xe3\x0a\x16\x46\x41\x18\x66\x76\xf3\x18\x57\x75\x3e\x7a\x78\x48\x10\x5b\xa1\xf4\x42\xe0\x25\x87\xe9\xf4\x7c\x6a\xcd\xda\xa1\x02\xd1\x7c\xbc\x4c\x62\x0f\x3e\x75\x51\x18\x4d\xcf\x03\x35\xd7\x98\x73\xf0\xb1\xa6\x7d\xa7\xe0\xb2\x60\xa2\xd3\x12\x97\xb8\xaa\x98\x28\xae\xa2\x19\x84\x63\x56\xfa\x89\x7e\x6e\x38\xf2\x08\x11\x8a\x94\xeb\x0a\xc4\xec\x7c\x36\x30\xbd\x6e\xb0\x42\xa8\x3e\xf7\x91\x3f\xf5\x06\x86\xfd\xa7\xd3\xe9\x79\x8c\xe3\xe7\x21\xea\xb7\xf0\x1b\x20\xca\x85\xc3\xa3\x22\x01\xd0\x15\xc3\xac\xec\x3c\x2e\x30\xda\x7d\x0f\xd1\x76\xd9\x7c\x55\x0a\x56\xa0\x14\xd0\x77\xb1\x1f\xed\x02\xad\x60\x1f\x2d\xbc\x77\xc3\xcd\xe8\x10\xd3\x9b\x84\x12\x33\xde\x9d\xf5\x03\xf1\xbb\xae\x97\xe3\x01\x5a\xb3\x96\x8a\x7d\x86\x96\x49\xde\x19\x69\xc9\x88\x92\x5a\xae\x8c\x14\x9c\x09\x97\x44\xcb\x6c\xb8\xf1\x5b\x10\x38\x1e\x54\xe6\x69\x7a\x92\x35\xfa\x9a\x15\x8c\xdc\x80\xf8\x49\xda\xbd\x2e\xcf\x3d\x10\xb4\xc7\xb1\x4b\xd7\xd4\xa0\x0a\x6b\xfd\x49\x2a\x3a\x64\x5a\x43\xac\x9f\x4b\xb4\xd5\xb1\x6c\xbb\x36\xde\x12\xda\x23\xe4\x9b\xdb\xb9\x1f\x9c\x47\x23\x0f\x50\x33\x26\xd1\xe9\x7e\xf1\xfc\xf3\x02\x6b\xad\xeb\x8f\x6a\xd1\x30\x78\xe3\x6d\x86\xae\xf0\xfd\xb4\x80\x1b\x97\x13\xa8\x4b\x29\x75\x56\x8a\xd3\x21\x2c\x6a\x2d\xba\x83\xe1\xea\xe8\xe3\xf5\x4b\x80\x25\x3a\xe0\xd2\x1d\x2e\x1d\xa1\x3d\x21\xba\x26\xb8\xac\xaa\xf5\xad\x1b\x1e\x98\xf1\xf2\x79\x5d\x0c\x34\x1c\x3d\x04\x7b\x36\x1e\x8f\x2a\x6c\xb5\x63\x61\xfb\x88\x12\x42\x55\x35\x68\xa4\x96\x52\x1a\x6d\x14\xae\x42\x87\x75\xd4\xf8\x20\x57\x57\x5e\x4d\x26\x98\x89\x95\xc2\xda\x28\x4b\x8c\x55\xa1\x94\xaa\x30\xe9\xbd\x21\x31\x07\xe9\xca\xdc\xac\xb1\x02\xda\x3c\x5c\x1e\x12\x1a\x55\x4a\x7e\x00\xd2\x09\xe8\xb1\x8d\x73\x05\xdb\x8d\x7f\xb7\x92\x2a\x47\x42\x52\x48\x94\xe4\x90\xf6\xba\xdc\xcc\x35\x96\xd6\x40\xdd\xf1\x44\x65\x8b\xf0\xca\x77\x05\x5a\xe3\xa6\x4e\xec\xcf\xdd\x42\x59\xb9\xee\xb3\x29\x22\x89\x55\xcc\xec\xa6\x9c\x4b\x82\xdd\x92\xe1\xc6\x11\xdd\x8c\xc4\x9a\x53\x8f\xf3\xec\xa4\x9e\xbc\xc4\x4b\xe0\x7a\x0e\x6a\x1e\x94\xe7\xe8\x59\x78\xa8\x63\x74\x28\x37\x19\xd7\x3f\xc9\xe4\x55\xfd\x93\xf9\xd1\x91\x92\xd6\x35\x72\xed\x19\x68\xbb\xa4\xb2\xc4\xee\xf6\xdf\x5e\x5c\xcd\x17\xd7\xef\x6e\x2f\x16\xb3\x79\x2a\x58\xe5\xba\xf2\x98\x87\xa7\x84\xb8\xf6\xa0\x15\xf3\xff\x39\x10\xc8\xb9\x70\x21\x1a\x04\x01\xdd\xa6\xae\x12\x0b\x5c\x00\x6d\xde\x30\x93\xfa\xac\xfd\xef\xfe\x8d\xd8\x5f\x6c\x37\x5e\x71\xb9\xfb\xc6\x2d\xaf\x14\xdb\x62\x03\xbf\x0f\xde\xf6\x70\xb0\xca\xd5\x6b\x7e\xbe\xee\x82\x7d\x34\x88\xe0\xb8\xfc\x9e\x84\xc7\x78\x01\x1d\xde\x1c\xeb\xfe\xe4\xe0\xab\xce\xc1\x9b\xbe\xff\xc6\x73\xa0\xb7\xd1\xbe\xd3\xae\x5b\xfd\xe6\x01\xb8\x79\xeb\x19\xb6\x39\x11\x1f\x76\x52\xe2\xfb\x48\x22\x3d\x13\xaf\x39\x2b\xd6\x26\xdc\xc4\xe6\x79\x39\x36\x5c\xfd\xa0\xb2\x95\xdc\x96\x9d\xd7\x12\xba\x13\xb8\x64\xc4\x07\x54\x17\x2d\x98\x28\x42\x7d\x42\x63\xc8\xff\x77\x00\x00\x00\xff\xff\x95\x6c\x5f\xfe\x9a\x1a\x00\x00") func masterEtcOriginMasterMasterConfigYamlBytes() ([]byte, error) { return bindataRead( @@ -198,7 +198,7 @@ func masterTmpAnsibleAnsibleSh() (*asset, error) { return a, nil } -var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\x6d\x6f\xe3\x36\x12\xfe\xee\x5f\x31\x50\x16\xf0\x1d\xb0\xb6\xb1\xbb\x05\xee\x2a\x60\x3f\xb8\xa9\xb6\x09\xda\xc4\x86\xed\x3d\xec\x5e\x51\xa8\x14\x35\xb2\x78\xa1\x38\x2a\x49\xd9\x71\x0d\xff\xf7\x03\x45\xca\x71\x94\x78\x93\x36\x9f\x22\x73\x5e\x1e\xce\x3c\xf3\xc2\xd1\x68\x34\xb8\x80\x4f\xd7\x5f\x6e\x92\x18\xa6\x6a\x67\x4b\xa1\xd6\x90\xa1\xa4\x2d\x08\x05\x4c\xca\x11\x67\xb5\x81\x92\x19\x10\x76\x68\x60\xc3\x64\x83\xa0\xb1\x96\x8c\x63\x0e\xd9\x0e\x0c\xe6\x4e\xd4\x96\x38\xb8\x80\xf0\x47\x35\x2a\x53\x8a\xc2\x56\xcc\x58\xd4\x86\x6b\x51\xdb\xb1\x29\x61\x5b\x0a\x5e\x82\x30\xa0\xc8\x82\xc8\x91\xc9\xb7\xb0\x45\x30\x25\x35\x32\x87\x42\xdc\x83\x2d\x99\x1d\x0f\x24\x71\x26\xbd\x72\x3c\x00\x28\xc9\x58\xe3\xfe\x01\x68\x4f\xdc\xf7\x00\x60\xc3\x74\xf8\x95\x29\x23\x32\x89\x29\x27\xa5\x90\x5b\x41\x2a\xf6\x92\x8f\x4e\xeb\x9d\x2d\x49\xa5\x42\x59\xd4\xb5\x46\x67\x1c\x26\x8d\xd1\x93\x4c\xa8\x89\x3f\x7c\x3f\x18\x3c\xba\x40\x2a\x8c\x33\x6a\x99\x50\xa8\xc5\x9f\x98\xc7\xf0\x89\x49\x83\x3d\x29\x2e\x05\x2a\x9b\x66\x42\x31\xbd\x3b\x31\x4a\xfc\x89\xbd\x8a\xad\x31\xb5\x6c\x1d\x43\xb4\xf9\x4f\xb2\x58\x5e\xcf\x6e\xa3\xbe\xd0\x16\x33\xe7\xd5\x90\xc4\xa0\xa0\x58\x85\x31\x44\xd7\x37\xd3\x9f\x92\x74\xbe\x48\x3e\x5d\x7f\x99\xf8\x8f\xd5\xd7\x79\x32\xda\x62\x36\x0a\x0a\x71\xdf\xaa\xc6\x75\xda\x68\x19\xc3\xf0\x9c\xf6\x9b\x3d\xa7\xaa\x26\x85\xca\x1e\xe2\x37\xfb\x0d\x6a\x23\x48\x1d\x86\x3d\x54\x3e\x1f\x69\x8e\x05\x6b\xa4\x4d\x4d\x93\xe5\x54\x31\xa1\x62\x18\xae\x92\x9b\xf9\x62\xf6\x79\x95\x2c\xae\xe7\x63\x25\xea\xb1\xa0\xa1\x77\x1f\xd8\x15\xfe\x5f\x95\xc2\x74\xd9\xce\x10\xec\xae\xc6\x8f\x42\x15\x9a\xbd\x85\xac\xb1\x8e\x0a\x25\xdb\x20\x58\x02\x29\x36\x08\x5b\x61\x4b\xd0\xb8\x16\xa4\xbc\x18\x14\xa4\x41\xd1\x36\x98\xcb\x90\xb3\xc6\x20\x50\x01\x12\xd7\x8c\xef\x40\x23\x33\xa4\x4c\x0f\xb9\xa6\xc6\x21\x37\x28\x91\x5b\xd2\x31\x0c\x4f\x8d\xf6\xef\xe9\xce\x8c\xd5\xbb\x6f\xcb\x5f\xc0\x2d\xb5\x14\x86\x6d\xb9\x03\xeb\x2e\x26\x0c\x30\xc8\x45\x51\xa0\x46\x65\x21\x67\x96\xb5\x57\xf4\x97\x13\x16\x44\x1f\x58\xad\xa9\x42\x5b\x62\x63\x52\x45\x39\x9e\x78\xdc\x07\x97\xc3\x18\x86\xde\xeb\xa1\x0b\xe8\x12\x2d\x4c\xff\x6c\x34\x82\xa9\x91\x8b\x42\x70\xef\xca\xc5\xc6\x55\x06\xe6\xd0\x5d\xa1\xe7\xce\x9f\x9e\x5c\xd0\x92\x76\xec\xaa\x35\x6d\x44\xee\xca\x21\x62\xce\x70\x9a\x49\xca\xa2\x57\x2a\xdf\x09\x95\xc7\x10\x51\xf6\x3f\xe4\xf6\xb5\x4a\x0f\x6e\x52\xc6\x39\x35\xca\x7a\x86\x0f\x17\xc9\x4f\xd7\xcb\xd5\xe2\x6b\xba\x5c\xcd\x16\x8e\xa1\xd3\xff\x7e\x5e\x24\xe9\xf4\xf2\x72\xf6\xf9\x76\x75\x3b\xbd\x49\xfa\xe9\x7a\xbd\x8b\x3b\xdc\xbd\xe8\xe1\xe7\xe4\xeb\xdf\x70\x70\x6c\x11\x31\x44\x9d\xdc\xdf\x08\x85\x46\x26\xab\x18\x22\x4e\x1a\xc7\x5b\xa1\x72\xda\x9a\xb1\x42\x1b\x3d\x53\x4b\xe1\x97\x2b\xa6\x73\xe0\x94\xa3\xa7\x60\xa8\x8f\xf1\x23\x99\xcb\x46\x3b\x42\x4a\x47\x53\x04\x2e\x1b\x57\xc7\x60\x2c\xb3\x08\xcc\x42\x8e\xb5\xa4\x5d\xe5\x28\x6b\x45\x85\x90\x13\xfa\x06\xed\x6b\xb1\x44\x70\xdc\x34\xc1\x98\xc7\x8f\x1a\x73\x57\xa6\xee\xd4\xf7\x05\x5f\xac\x92\x65\x28\x0d\xb0\xba\x96\x02\x73\x60\xca\x71\x91\xe5\x3b\x27\x9b\x21\xfc\xd1\xa0\x16\x98\x07\x53\x6c\xcd\x84\x32\xd6\x61\x70\x76\x6a\x12\xca\xb6\xb3\xc4\xa1\xf0\x33\x25\x80\x6b\xc7\x82\x17\x92\x6c\x97\x11\xdd\x19\xd0\x8d\x1a\xc3\x54\x1a\x7a\x1b\xcc\xb9\x63\x5f\xe9\xed\x80\x12\x9c\x41\x9b\x76\x08\x1d\x0b\xa2\x82\x49\x69\x20\x63\xfc\x2e\x72\x80\xde\x39\x9c\x9a\x6a\x2d\x98\x45\xb9\x83\x6d\x89\x1a\x81\x99\x53\x7b\x21\x5b\x47\x8b\x92\xd6\xae\xde\x42\x88\xc6\xb0\x6a\x75\xb6\xcc\x00\x93\x86\x20\x17\x86\x37\xc6\xb5\x4f\x60\x19\xb9\xaa\x2f\x82\xb5\x76\x7c\x9e\xf8\x73\x00\x72\xf2\x59\x0b\x7d\xae\xf5\x60\xe0\xe3\x47\xf0\xcd\xae\x0d\xfb\xb1\xc9\x39\x03\xc1\x56\x8d\xba\x40\xee\x12\x5a\x20\x6b\x67\x9b\xb3\xd6\xa6\x8b\xa9\x13\xe5\x70\xff\x52\xac\x4b\xd4\x2e\x86\x5d\x4c\x8d\xd0\xc7\x2c\x74\x7e\x1f\x73\x66\x81\x1b\x61\x84\x05\xc9\x5c\x3c\xff\x51\x93\x71\x7e\x76\xae\xc1\x31\xac\x48\x19\xb4\x40\x1a\xde\x90\x2d\x51\xff\xf3\x0c\xd5\x7d\xdb\xed\x1c\xc4\xf0\xee\x85\x92\x38\x95\x3c\xdf\x29\x5b\xe2\xc6\x10\xd5\x1a\x0d\xaa\x27\x2d\xa7\xdf\x53\xf1\xbe\x26\xed\x70\xf8\x49\x1a\xa6\x5b\x0c\xc3\xf9\x62\x76\x93\xac\xae\x92\xcf\xcb\x34\xf9\x32\x9f\x2d\x56\xc9\x22\x0d\xb3\x73\xf8\xcd\x89\xac\x8c\x65\x52\xc6\xb0\xd2\x0d\x7e\x03\xa7\xf7\x57\x6b\x2c\xc4\x7d\x7f\x76\xf7\x41\x3f\xd4\x60\xea\x26\x46\x0c\xc3\xe9\xed\xf2\xfa\x87\x5f\x92\xf4\xc7\x64\xfe\xcb\xec\x6b\x3b\xa8\x03\xaa\x6e\x9d\x31\xa8\x37\x82\x63\x9a\x69\xba\x73\xd7\x7b\x04\xeb\x5b\x82\x0e\x57\x0c\xb0\xdf\xff\x1e\xed\xf7\x20\x53\x66\xb2\xe3\x54\x6f\x0f\x4d\x9a\x0b\x6e\x7f\x3d\x0b\xef\x37\x10\xc5\x79\xf0\x8e\xc0\x43\xd2\x62\x2d\xd4\x10\x50\x1a\x0c\x2e\x7c\x3c\x1a\x2d\xe1\x70\x88\x7e\x3f\x1c\x7a\x21\xe8\x50\x72\x66\x99\xa4\x75\x07\xf3\xec\xce\x13\xe4\x47\x41\xfe\x64\xef\x79\x76\x6d\x41\xcb\x73\xe7\xdc\xc4\xf0\x6b\x54\x5a\x5b\x9b\x78\x32\xb9\x9a\x2d\xdb\x99\x12\xbf\xff\xf0\xaf\xef\xa3\xdf\x7c\x7c\x2d\x56\xb5\xa3\xfd\x99\xb8\x9d\x05\xd4\xe9\x1d\x91\x79\xbd\xfe\x42\x76\x01\xb7\xb3\x55\x12\xfb\x5d\x48\x18\xd0\x98\x37\x2a\x67\xca\xfa\x35\x41\xe3\x1f\x8d\x08\xdd\xb5\x64\x2a\x97\x08\x81\xb2\x60\xee\x70\x0b\x19\xda\x2d\xa2\x0a\xa6\x7a\xfb\xf6\x28\xa4\xdc\x75\xcf\x59\x9b\x81\xb6\x05\xcf\x2e\xe7\xbd\xa0\xe0\x3d\xab\x6a\x89\x7e\xb9\x75\x89\x3b\x96\x45\xb4\x59\x5e\xcd\x16\x2b\x57\x09\x4f\xf6\x52\x4e\xfc\xae\x16\x5d\xc2\x51\x3f\x90\xfb\x72\x76\xf9\xf3\xfc\x7a\x75\x8e\xde\x4f\x14\x33\x66\x30\xec\xb4\x9d\xea\x0f\xd3\x65\xe2\x72\xf1\xa2\xee\x03\xd4\x4e\xf5\xf9\x00\xff\x48\xed\x14\xcb\xb1\x10\x0a\x5f\xec\x4e\x60\xe8\x61\xca\x58\x66\xee\xa0\x10\x12\xbb\x1e\xd9\xca\x8e\x77\x95\x04\xa1\x8c\xc8\xfd\x58\xec\x9b\x04\x4d\x12\xa1\xd0\x54\x3d\x93\x91\xad\x90\xb2\x9b\x7b\x8d\xa5\x9a\xea\xc6\x91\xc5\xad\x85\x8d\x71\xaf\xae\xe7\x2c\xf6\x37\xd7\x36\x9b\x6e\x88\xee\x84\x5a\x9f\x8c\x29\xd5\x54\x19\x6a\xb7\x07\x9f\x4c\x8e\x3e\xdd\xd0\x3d\x98\x04\xcb\x64\x3b\x54\xdc\x13\x8f\x69\xc7\x14\x8b\x5a\x31\xe9\xf8\xf6\x14\xb5\xf3\xb7\xc5\xa1\xc6\x47\x7c\xf3\xfb\x62\x00\x5d\x41\x3b\x02\x2d\x01\xdb\x90\xc8\x5b\x3c\x42\x71\xb7\x73\xb8\xcd\xc4\x58\x07\xab\x60\xdc\x42\x21\x54\xde\xe1\x3e\xba\xf2\x0f\x38\x00\x4e\x55\x45\xaa\xfb\x72\xdf\xaa\x10\xeb\x96\x28\x31\x4c\xd0\xf2\x89\x6f\x29\x93\xa3\xc4\x2b\x39\xec\x85\x1f\x1e\x93\xe1\x0a\x4d\x26\x05\x3f\xf6\xf4\xf6\x81\x74\x6c\x0b\xfb\x3d\x8c\x93\x7b\x1f\x97\x9b\x56\xf1\x8a\x4c\xbb\xa1\xc2\xe1\x10\xff\xfb\xbb\xef\x3e\x4c\x82\x62\xd4\x37\xc8\x6a\xf1\x97\x8d\x3d\x18\x79\x4d\x93\x82\x8b\xf0\x5e\xbf\x25\x90\xa4\xd6\xa8\x41\x21\xe6\x6e\xdb\x32\x2e\xd4\x4f\x92\x38\xfa\x30\xfe\x7e\xfc\xfe\xfd\xe8\x5d\x78\x63\x0d\x35\xb6\x85\x41\xca\x13\xbe\x6b\x2f\x3b\xb4\x01\x88\xa3\xcf\x43\xa8\xdc\x57\xd8\xce\x3b\x28\xc3\xc1\x80\x28\x74\x56\x93\x5a\x4a\x7d\xb6\xce\xbe\xd5\xff\x1f\x00\x00\xff\xff\xe7\x02\x45\x23\x68\x10\x00\x00") +var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\x6d\x6f\xdb\x38\x12\xfe\x9e\x5f\x31\x70\x0b\xf8\x0e\xa8\x6d\xb4\x5d\xe0\x6e\x05\xf4\x83\x37\xab\x36\xc1\x36\x71\x60\x3b\x87\xf6\x16\x0b\x2d\x45\x8d\x2c\x5e\x68\x8e\x96\xa4\xec\x78\x83\xfc\xf7\xc3\x88\x94\xe3\x28\x71\x9b\xdd\x7c\x8a\xcc\x79\x79\x38\xf3\xcc\x0b\x47\xa3\xd1\xc9\x2b\xf8\x78\xfe\xe5\x22\x4d\x60\x6a\x76\xbe\x52\x66\x05\x39\x6a\xda\x82\x32\x20\xb4\x1e\x49\x51\x3b\xa8\x84\x03\xe5\x87\x0e\x36\x42\x37\x08\x16\x6b\x2d\x24\x16\x90\xef\xc0\x61\xc1\xa2\xbe\xc2\x93\x57\x10\xff\xa8\x46\xe3\x2a\x55\xfa\xb5\x70\x1e\xad\x93\x56\xd5\x7e\xec\x2a\xd8\x56\x4a\x56\xa0\x1c\x18\xf2\xa0\x0a\x14\xfa\x0d\x6c\x11\x5c\x45\x8d\x2e\xa0\x54\xb7\xe0\x2b\xe1\xc7\x27\x9a\xa4\xd0\x41\x39\x39\x01\xa8\xc8\x79\xc7\xff\x00\xb4\x27\xfc\x7d\x02\xb0\x11\x36\xfe\x2a\x8c\x53\xb9\xc6\x4c\x92\x31\x28\xbd\x22\x93\x04\xc9\x47\xa7\xf5\xce\x57\x64\x32\x65\x3c\xda\xda\x22\x1b\x87\x49\xe3\xec\x24\x57\x66\x12\x0e\xdf\x9d\x9c\x3c\xba\x40\xa6\x1c\x1b\xf5\x42\x19\xb4\xea\x4f\x2c\x12\xf8\x28\xb4\xc3\x9e\x94\xd4\x0a\x8d\xcf\x72\x65\x84\xdd\x1d\x18\x25\xf9\xc4\xde\x5a\xac\x30\xf3\x62\x95\xc0\x60\xf3\x9f\x74\xbe\x38\x9f\x5d\x0e\xfa\x42\x5b\xcc\xd9\xab\x23\x8d\x51\xc1\x88\x35\x26\x30\x38\xbf\x98\x7e\x4a\xb3\xab\x79\xfa\xf1\xfc\xcb\x24\x7c\x2c\xbf\x5e\xa5\xa3\x2d\xe6\xa3\xa8\x90\xf4\xad\x5a\x5c\x65\x8d\xd5\x09\x0c\x2f\xa6\x8b\x65\x3a\xcf\x66\xf3\xf4\x53\x76\x3d\xff\x3c\x7a\x7d\x27\x69\x5d\x93\x41\xe3\xef\x93\xd7\x77\x1b\xb4\x4e\x91\xb9\x1f\xf6\xd0\x84\x3c\x64\x05\x96\xa2\xd1\x3e\x73\x4d\x5e\xd0\x5a\x28\x93\xc0\x70\x99\x5e\x5c\xcd\x67\xd7\xcb\x74\x7e\x7e\x35\x36\xaa\x1e\x2b\x1a\x06\xb7\x91\x55\xf1\xff\x65\xa5\x5c\x97\xe5\x1c\xc1\xef\x6a\xfc\xa0\x4c\x69\xc5\x1b\xc8\x1b\xcf\x14\xa8\xc4\x06\xc1\x13\x68\xb5\x41\xd8\x2a\x5f\x81\xc5\x95\x22\x13\xc4\xa0\x24\x0b\x86\xb6\xd1\x5c\x8e\x52\x34\x0e\x81\x4a\xd0\xb8\x12\x72\x07\x16\x85\x23\xe3\x7a\xc8\x2d\x35\x8c\xdc\xa1\x46\xe9\xc9\x26\x30\x3c\x34\xda\xbf\x27\x9f\x39\x6f\x77\xdf\x96\x7f\x05\x97\xd4\x52\x17\xb6\xd5\x0e\x3c\x5f\x4c\x39\x10\x50\xa8\xb2\x44\x8b\xc6\x43\x21\xbc\x68\xaf\x18\x2e\xa7\x3c\xa8\x3e\xb0\xda\xd2\x1a\x7d\x85\x8d\xcb\x0c\x15\x78\xe0\xf1\x2e\xba\x1c\x26\x30\x0c\x5e\xef\xbb\x80\x2e\xd0\xc3\xf4\xcf\xc6\x22\xb8\x1a\xa5\x2a\x95\x0c\xae\x38\x36\x5c\x11\x58\x40\x77\x85\x9e\xbb\x70\x7a\x70\x41\x4f\x96\x59\x55\x5b\xda\xa8\x82\xcb\x60\x20\xd8\x70\x96\x6b\xca\x07\x2f\x54\xbe\x51\xa6\x48\x60\x40\xf9\xff\x50\xfa\x97\x2a\x3d\xb8\xc9\x84\x94\xd4\x18\x1f\x98\x3d\x9c\xa7\x9f\xce\x17\xcb\xf9\xd7\x6c\xb1\x9c\xcd\x99\xd7\xd3\xff\x5e\xcf\xd3\x6c\x7a\x7a\x3a\xbb\xbe\x5c\x5e\x4e\x2f\xd2\x7e\xba\x5e\xee\xe2\x06\x77\xdf\xf5\xf0\x4b\xfa\xf5\x6f\x38\xd8\xb7\x86\x04\x06\x9d\xdc\xdf\x08\x85\x45\xa1\xd7\x09\x0c\x24\x59\x1c\x6f\x95\x29\x68\xeb\xc6\x06\xfd\xe0\x99\x5a\x8a\xbf\x9c\x09\x5b\x80\xa4\x02\x03\x05\x63\x7d\x8c\x1f\xc9\x9c\x36\x96\x09\xa9\x99\xa6\x08\x52\x37\x5c\xc7\xe0\xbc\xf0\x08\xc2\x43\x81\xb5\xa6\xdd\x9a\x29\xeb\xd5\x1a\xa1\x20\x0c\x8d\x39\xd4\x62\x85\xc0\xdc\x74\xd1\x58\xc0\x8f\x16\x0b\x2e\x53\x3e\x0d\x7d\x21\x14\xab\x16\x39\x6a\x07\xa2\xae\xb5\xc2\x02\x84\x61\x2e\x8a\x62\xc7\xb2\x39\xc2\x1f\x0d\x5a\x85\x45\x34\x25\x56\x42\x19\xe7\x19\x03\xdb\xa9\x49\x19\xdf\xce\x10\x46\x11\x66\x49\x04\xd7\x8e\x83\x20\xa4\xc5\x2e\x27\xba\x71\x60\x1b\x33\x86\xa9\x76\xf4\x26\x9a\xe3\xe3\x50\xe9\xed\x60\x52\x52\x40\x9b\x76\x88\x1d\x0b\x06\xa5\xd0\xda\x41\x2e\xe4\xcd\x80\x01\xbd\x65\x9c\x96\x6a\xab\x84\x47\xbd\x83\x6d\x85\x16\x41\xb8\x43\x7b\x31\x5b\x7b\x8b\x9a\x56\x5c\x6f\x31\x44\x63\x58\xb6\x3a\x5b\xe1\x40\x68\x47\x50\x28\x27\x1b\xc7\xed\x13\x44\x4e\x5c\xf5\x65\xb4\xd6\x8e\xcd\x03\x7f\x0c\xa0\xa0\x90\xb5\xd8\xe7\x5a\x0f\x0e\x3e\x7c\x80\xd0\xec\xda\xb0\xef\x9b\x1c\x1b\x88\xb6\x6a\xb4\x25\x4a\x4e\x68\x89\xa2\x9d\x69\x6c\xad\x4d\x97\x30\x07\xca\xf1\xfe\x95\x5a\x55\x68\x39\x86\x5d\x4c\x9d\xb2\xfb\x2c\x74\x7e\x1f\x73\x66\x8e\x1b\xe5\x94\x07\x2d\x38\x9e\xff\xa8\xc9\xb1\x9f\x1d\x37\x38\x81\x6b\x32\x0e\x3d\x90\x85\xd7\xe4\x2b\xb4\xff\x3c\x42\xf5\xd0\x76\x3b\x07\x09\xbc\xfd\x4e\x49\x1c\x4a\x1e\xef\x94\x2d\x71\x13\x18\xd4\x16\x1d\x9a\x27\x2d\xa7\xdf\x53\xf1\xb6\x26\xcb\x38\xc2\x04\x8d\xd3\x2d\x81\xe1\xd5\x7c\x76\x91\x2e\xcf\xd2\xeb\x45\x96\x7e\xb9\x9a\xcd\x79\x26\xc6\x99\x39\xfc\xe6\x24\x36\xce\x0b\xad\x13\x58\xda\x06\xbf\x81\x33\xf8\xab\x2d\x96\xea\xb6\x3f\xb3\xfb\xa0\x1f\x6a\x30\xe3\x89\x91\xc0\x70\x7a\xb9\x38\xff\xe9\x73\x9a\xfd\x9c\x5e\x7d\x9e\x7d\x6d\xc7\x7b\x44\xd5\xad\x31\x0e\xed\x46\x49\xcc\x72\x4b\x37\x7c\xbd\x47\xb0\xbe\x25\xc8\xb8\x12\x80\xbb\xbb\xdf\x07\x77\x77\xa0\x33\xe1\xf2\xfd\x54\x6f\x0f\x5d\x56\x28\xe9\x7f\x3d\x0a\xef\x37\x50\xe5\x71\xf0\x4c\xe0\x21\x59\xb5\x52\x66\x08\xa8\x1d\x46\x17\x21\x1e\x8d\xd5\x70\x7f\x3f\xf8\xfd\xfe\xbe\x17\x82\x0e\xa5\x14\x5e\x68\x5a\x75\x30\x8f\xee\x3a\x51\x7e\x14\xe5\x0f\xf6\x9d\x67\xd7\x16\xf4\xb2\x60\xe7\x2e\x81\x5f\x07\x95\xf7\xb5\x4b\x26\x93\xb3\xd9\xa2\x9d\x29\xc9\xbb\xf7\xff\xfa\x71\xf0\x5b\x88\xaf\xc7\x75\xcd\xb4\x3f\x12\xb7\xa3\x80\x3a\xbd\x3d\xb2\xa0\xd7\x5f\xc4\x5e\xc1\xe5\x6c\x99\x26\x61\x17\x52\x0e\x2c\x16\x8d\x29\x84\xf1\x61\x4d\xb0\xf8\x47\xa3\x62\x77\xad\x84\x29\x34\x42\xa4\x2c\xb8\x1b\xdc\x42\x8e\x7e\x8b\x68\xa2\xa9\xde\x9e\x3d\x8a\x29\xe7\xee\x39\x6b\x33\xd0\xb6\xe0\xd9\xe9\x55\x2f\x28\x78\x2b\xd6\xb5\xc6\xb0\xd4\x72\xe2\xf6\x65\x31\xd8\x2c\xce\x66\xf3\x25\x57\xc2\x93\x7d\x54\x92\xbc\xa9\x55\x97\x70\xb4\x0f\xe4\x3e\x9d\x9d\xfe\x72\x75\xbe\x3c\x46\xef\x27\x8a\xb9\x70\x18\x77\xd9\x4e\xf5\xa7\xe9\x22\xe5\x5c\x7c\x57\xf7\x01\x6a\xa7\xfa\x7c\x80\x7f\xa6\x76\x8a\x15\x58\x2a\x83\xdf\xed\x4e\xe0\xe8\x61\xca\x78\xe1\x6e\xa0\x54\x1a\xbb\x1e\xd9\xca\x8e\x77\x6b\x0d\xca\x38\x55\x84\xb1\xd8\x37\x09\x96\x34\x42\x69\x69\xfd\x4c\x46\xb6\x4a\xeb\x6e\xee\x35\x9e\x6a\xaa\x1b\x26\x0b\xaf\x85\x8d\xe3\xd7\xd6\x73\x16\xfb\x9b\x6b\x9b\x4d\x1e\xa2\x3b\x65\x56\x07\x63\xca\x34\xeb\x1c\x2d\xef\xc1\x07\x93\xa3\x4f\x37\xe4\x87\x92\x12\xb9\x6e\x87\x0a\x3f\xed\x84\x65\xa6\x78\xb4\x46\x68\xe6\xdb\x53\xd4\xec\x6f\x8b\x43\x8b\x8f\xf8\x16\xf6\xc5\x08\x7a\x0d\xed\x08\xf4\x04\x62\x43\xaa\x68\xf1\x28\x23\x79\xe7\xe0\xcd\xc4\x79\x86\x55\x0a\xe9\xa1\x54\xa6\xe8\x70\xef\x5d\x85\x87\x1b\x80\xa4\xf5\x9a\x4c\xf7\xc5\xdf\xa6\x54\xab\x96\x28\x09\x4c\xd0\xcb\x49\x68\x29\x93\xbd\xc4\x0b\x39\x1c\x84\x1f\x1e\x91\xf1\x0a\x4d\xae\x95\xdc\xf7\xf4\xf6\x61\xb4\x6f\x0b\x77\x77\x30\x4e\x6f\x43\x5c\x2e\x5a\xc5\x33\x72\xed\x86\x0a\xf7\xf7\xc9\xbf\x7f\xf8\xe1\xfd\x24\x2a\x0e\xfa\x06\x45\xad\xfe\xb2\xb1\x07\x23\x2f\x69\x52\xf0\x2a\xbe\xd3\x2f\x09\x34\x99\x15\x5a\x30\x88\x05\x6f\x5b\x8e\x43\xfd\x24\x89\xa3\xf7\xe3\x1f\xc7\xef\xde\x8d\xde\xc6\x37\xd6\xd0\x62\x5b\x18\x64\x02\xe1\xbb\xf6\xb2\x43\x1f\x81\x30\x7d\x1e\x42\xc5\x5f\x71\x3b\xef\xa0\x0c\x4f\x4e\x88\x62\x67\x75\x99\xa7\x2c\x64\xeb\xe8\x1b\xfd\xff\x01\x00\x00\xff\xff\xc3\xf7\x65\xa1\x60\x10\x00\x00") func masterTmpAnsibleAzureLocalMasterInventoryYmlBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml index a4be4587c6..c0d559967b 100644 --- a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml +++ b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml @@ -87,7 +87,7 @@ etcdStorageConfig: openShiftStoragePrefix: openshift.io openShiftStorageVersion: v1 imageConfig: - format: IMAGE_PREFIX/IMAGE_TYPE-${component}:${version} + format: MASTER_OREG_URL-${component}:${version} latest: false imagePolicyConfig: internalRegistryHostname: docker-registry.default.svc:5000 diff --git a/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml b/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml index 9fb2fd9342..365f530b9d 100644 --- a/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml +++ b/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml @@ -15,7 +15,7 @@ localmaster: openshift_web_console_image_name: "IMAGE_PREFIX/IMAGE_TYPE-web-console:vVERSION" - oreg_url: 'IMAGE_PREFIX/IMAGE_TYPE-${component}:${version}' + oreg_url: 'MASTER_OREG_URL-${component}:${version}' openshift_master_default_subdomain: 'TEMPROUTERIP.nip.io' # FIXME From 47dd3c9f357bc2ffaea2b9709a0c0d6d430c2d62 Mon Sep 17 00:00:00 2001 From: Patrick Lang Date: Fri, 22 Jun 2018 11:48:21 -0700 Subject: [PATCH 17/74] Enabling 2 Windows E2E tests (#3332) --- test/e2e/kubernetes/deployment/deployment.go | 24 ++---- test/e2e/kubernetes/kubernetes_test.go | 78 ++++++++++---------- test/e2e/kubernetes/util/util.go | 13 ++++ 3 files changed, 60 insertions(+), 55 deletions(-) diff --git a/test/e2e/kubernetes/deployment/deployment.go b/test/e2e/kubernetes/deployment/deployment.go index daf69b7047..2b7439972d 100644 --- a/test/e2e/kubernetes/deployment/deployment.go +++ b/test/e2e/kubernetes/deployment/deployment.go @@ -66,8 +66,7 @@ func CreateLinuxDeploy(image, name, namespace, miscOpts string) (*Deployment, er } else { cmd = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides) } - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out)) return nil, err @@ -85,8 +84,7 @@ func CreateLinuxDeploy(image, name, namespace, miscOpts string) (*Deployment, er func RunLinuxDeploy(image, name, namespace, command string, replicas int) (*Deployment, error) { overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}` cmd := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--replicas", strconv.Itoa(replicas), "--overrides", overrides, "--command", "--", "/bin/sh", "-c", command) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out)) return nil, err @@ -103,8 +101,7 @@ func RunLinuxDeploy(image, name, namespace, command string, replicas int) (*Depl func CreateWindowsDeploy(image, name, namespace string, port int, hostport int) (*Deployment, error) { overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"windows"}}}}}` cmd := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--port", strconv.Itoa(port), "--hostport", strconv.Itoa(hostport), "--overrides", overrides) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out)) return nil, err @@ -120,8 +117,7 @@ func CreateWindowsDeploy(image, name, namespace string, port int, hostport int) // Get returns a deployment from a name and namespace func Get(name, namespace string) (*Deployment, error) { cmd := exec.Command("kubectl", "get", "deploy", "-o", "json", "-n", namespace, name) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error while trying to fetch deployment %s in namespace %s:%s\n", name, namespace, string(out)) return nil, err @@ -138,8 +134,7 @@ func Get(name, namespace string) (*Deployment, error) { // Delete will delete a deployment in a given namespace func (d *Deployment) Delete() error { cmd := exec.Command("kubectl", "delete", "deploy", "-n", d.Metadata.Namespace, d.Metadata.Name) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error while trying to delete deployment %s in namespace %s:%s\n", d.Metadata.Namespace, d.Metadata.Name, string(out)) return err @@ -147,8 +142,7 @@ func (d *Deployment) Delete() error { // Delete any associated HPAs if d.Metadata.HasHPA { cmd := exec.Command("kubectl", "delete", "hpa", "-n", d.Metadata.Namespace, d.Metadata.Name) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Deployment %s has associated HPA but unable to delete in namespace %s:%s\n", d.Metadata.Namespace, d.Metadata.Name, string(out)) return err @@ -160,8 +154,7 @@ func (d *Deployment) Delete() error { // Expose will create a load balancer and expose the deployment on a given port func (d *Deployment) Expose(svcType string, targetPort, exposedPort int) error { cmd := exec.Command("kubectl", "expose", "deployment", d.Metadata.Name, "--type", svcType, "-n", d.Metadata.Namespace, "--target-port", strconv.Itoa(targetPort), "--port", strconv.Itoa(exposedPort)) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error while trying to expose (%s) target port (%v) for deployment %s in namespace %s on port %v:%s\n", svcType, targetPort, d.Metadata.Name, d.Metadata.Namespace, exposedPort, string(out)) return err @@ -173,8 +166,7 @@ func (d *Deployment) Expose(svcType string, targetPort, exposedPort int) error { func (d *Deployment) CreateDeploymentHPA(cpuPercent, min, max int) error { cmd := exec.Command("kubectl", "autoscale", "deployment", d.Metadata.Name, fmt.Sprintf("--cpu-percent=%d", cpuPercent), fmt.Sprintf("--min=%d", min), fmt.Sprintf("--max=%d", max)) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error while configuring autoscale against deployment %s:%s\n", d.Metadata.Name, string(out)) return err diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 068179a925..1cdbe1b509 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -714,12 +714,11 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu }) Describe("with a windows agent pool", func() { - // TODO stabilize this test - /*It("should be able to deploy an iis webserver", func() { + It("should be able to deploy an iis webserver", func() { if eng.HasWindowsAgents() { r := rand.New(rand.NewSource(time.Now().UnixNano())) deploymentName := fmt.Sprintf("iis-%s-%v", cfg.Name, r.Intn(99999)) - iisDeploy, err := deployment.CreateWindowsDeploy("microsoft/iis:windowsservercore-1709", deploymentName, "default", 80, -1) + iisDeploy, err := deployment.CreateWindowsDeploy("microsoft/iis:windowsservercore-1803", deploymentName, "default", 80, -1) Expect(err).NotTo(HaveOccurred()) running, err := pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout) @@ -738,11 +737,12 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu iisPods, err := iisDeploy.Pods() Expect(err).NotTo(HaveOccurred()) Expect(len(iisPods)).ToNot(BeZero()) - for _, iisPod := range iisPods { - pass, err := iisPod.CheckWindowsOutboundConnection(10*time.Second, cfg.Timeout) - Expect(err).NotTo(HaveOccurred()) - Expect(pass).To(BeTrue()) - } + // BUG - https://github.com/Azure/acs-engine/issues/3143 + // for _, iisPod := range iisPods { + // pass, err := iisPod.CheckWindowsOutboundConnection(10*time.Second, cfg.Timeout) + // Expect(err).NotTo(HaveOccurred()) + // Expect(pass).To(BeTrue()) + // } err = iisDeploy.Delete() Expect(err).NotTo(HaveOccurred()) @@ -751,46 +751,46 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } else { Skip("No windows agent was provisioned for this Cluster Definition") } - })*/ + }) - // TODO stabilize this test - /*It("should be able to reach hostport in an iis webserver", func() { - if eng.HasWindowsAgents() { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - hostport := 8123 - deploymentName := fmt.Sprintf("iis-%s-%v", cfg.Name, r.Intn(99999)) - iisDeploy, err := deployment.CreateWindowsDeploy("microsoft/iis:windowsservercore-1709", deploymentName, "default", 80, hostport) - Expect(err).NotTo(HaveOccurred()) + // Windows Bug 16598869 + /* + It("should be able to reach hostport in an iis webserver", func() { + if eng.HasWindowsAgents() { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + hostport := 8123 + deploymentName := fmt.Sprintf("iis-%s-%v", cfg.Name, r.Intn(99999)) + iisDeploy, err := deployment.CreateWindowsDeploy("microsoft/iis:windowsservercore-1803", deploymentName, "default", 80, hostport) + Expect(err).NotTo(HaveOccurred()) - running, err := pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout) - Expect(err).NotTo(HaveOccurred()) - Expect(running).To(Equal(true)) + running, err := pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) - iisPods, err := iisDeploy.Pods() - Expect(err).NotTo(HaveOccurred()) - Expect(len(iisPods)).ToNot(BeZero()) + iisPods, err := iisDeploy.Pods() + Expect(err).NotTo(HaveOccurred()) + Expect(len(iisPods)).ToNot(BeZero()) - kubeConfig, err := GetConfig() - Expect(err).NotTo(HaveOccurred()) - master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) - sshKeyPath := cfg.GetSSHKeyPath() + kubeConfig, err := GetConfig() + Expect(err).NotTo(HaveOccurred()) + master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) + sshKeyPath := cfg.GetSSHKeyPath() - for _, iisPod := range iisPods { - valid := iisPod.ValidateHostPort("(IIS Windows Server)", 10, 10*time.Second, master, sshKeyPath) - Expect(valid).To(BeTrue()) - } + for _, iisPod := range iisPods { + valid := iisPod.ValidateHostPort("(IIS Windows Server)", 10, 10*time.Second, master, sshKeyPath) + Expect(valid).To(BeTrue()) + } - err = iisDeploy.Delete() - Expect(err).NotTo(HaveOccurred()) - } else { - Skip("No windows agent was provisioned for this Cluster Definition") - } - })*/ + err = iisDeploy.Delete() + Expect(err).NotTo(HaveOccurred()) + } else { + Skip("No windows agent was provisioned for this Cluster Definition") + } + })*/ - // TODO stabilize this test /*It("should be able to attach azure file", func() { if eng.HasWindowsAgents() { - if common.IsKubernetesVersionGe(eng.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion ,"1.8") { + if common.IsKubernetesVersionGe(eng.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion, "1.8") { storageclassName := "azurefile" // should be the same as in storageclass-azurefile.yaml sc, err := storageclass.CreateStorageClassFromFile(filepath.Join(WorkloadDir, "storageclass-azurefile.yaml"), storageclassName) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/kubernetes/util/util.go b/test/e2e/kubernetes/util/util.go index 385422c299..2fe159685a 100644 --- a/test/e2e/kubernetes/util/util.go +++ b/test/e2e/kubernetes/util/util.go @@ -2,11 +2,24 @@ package util import ( "fmt" + "log" "os/exec" "strings" + "time" ) // PrintCommand prints a command string func PrintCommand(cmd *exec.Cmd) { fmt.Printf("\n$ %s\n", strings.Join(cmd.Args, " ")) } + +// RunAndLogCommand logs the command with a timestamp when it's run, and the duration at end +func RunAndLogCommand(cmd *exec.Cmd) ([]byte, error) { + cmdLine := fmt.Sprintf("$ %s", strings.Join(cmd.Args, " ")) + start := time.Now() + log.Printf("%s", cmdLine) + out, err := cmd.CombinedOutput() + end := time.Now() + log.Printf("#### %s completed in %s", cmdLine, end.Sub(start).String()) + return out, err +} From 8c51e8b8e9b912caad12f1b8149b4deeb2aaa54e Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 22 Jun 2018 12:16:12 -0700 Subject: [PATCH 18/74] undo cilium changes to base64 encoding (#3340) --- examples/networkpolicy/kubernetes-cilium.json | 2 +- parts/k8s/kubernetesmastercustomdata.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/networkpolicy/kubernetes-cilium.json b/examples/networkpolicy/kubernetes-cilium.json index f4cd723392..194568c772 100644 --- a/examples/networkpolicy/kubernetes-cilium.json +++ b/examples/networkpolicy/kubernetes-cilium.json @@ -3,7 +3,7 @@ "properties": { "orchestratorProfile": { "orchestratorType": "Kubernetes", - "orchestratorRelease": "1.9", + "orchestratorRelease": "1.10", "kubernetesConfig": { "networkPolicy": "cilium", "addons": [ diff --git a/parts/k8s/kubernetesmastercustomdata.yml b/parts/k8s/kubernetesmastercustomdata.yml index d3155a9ca9..cad59d67f2 100644 --- a/parts/k8s/kubernetesmastercustomdata.yml +++ b/parts/k8s/kubernetesmastercustomdata.yml @@ -264,9 +264,9 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER {{if eq .OrchestratorProfile.KubernetesConfig.NetworkPolicy "cilium"}} # If Cilium Policy enabled then update the etcd certs and address sed -i "s||{{WrapAsVerbatim "variables('masterEtcdClientURLs')[copyIndex(variables('masterOffset'))]"}}|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" - sed -i "s||\"$(base64 -w 0 /etc/kubernetes/certs/ca.crt)\"|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" - sed -i "s||\"$(base64 -w 0 /etc/kubernetes/certs/etcdclient.key)\"|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" - sed -i "s||\"$(base64 -w 0 /etc/kubernetes/certs/etcdclient.crt)\"|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" + sed -i "s||$(base64 -w 0 /etc/kubernetes/certs/ca.crt)|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" + sed -i "s||$(base64 -w 0 /etc/kubernetes/certs/etcdclient.key)|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" + sed -i "s||$(base64 -w 0 /etc/kubernetes/certs/etcdclient.crt)|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" {{end}} {{if UseCloudControllerManager }} sed -i "s||{{WrapAsVariable "kubernetesCcmImageSpec"}}|g" "/etc/kubernetes/manifests/cloud-controller-manager.yaml" From 10a781c9f370d07e827af4f2106496067eb48e09 Mon Sep 17 00:00:00 2001 From: Chris Ottinger Date: Tue, 26 Jun 2018 02:27:57 +1000 Subject: [PATCH 19/74] moving journald restart to end of config function (#3348) --- parts/k8s/kubernetescustomscript.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parts/k8s/kubernetescustomscript.sh b/parts/k8s/kubernetescustomscript.sh index 9e1534be62..726d29f349 100644 --- a/parts/k8s/kubernetescustomscript.sh +++ b/parts/k8s/kubernetescustomscript.sh @@ -385,11 +385,11 @@ function extractHyperkube(){ } function ensureJournal(){ - systemctlEnableAndStart systemd-journald echo "Storage=persistent" >> /etc/systemd/journald.conf echo "SystemMaxUse=1G" >> /etc/systemd/journald.conf echo "RuntimeMaxUse=1G" >> /etc/systemd/journald.conf echo "ForwardToSyslog=no" >> /etc/systemd/journald.conf + systemctlEnableAndStart systemd-journald } function ensurePodSecurityPolicy() { From b5f074cfd9a63e84d8867f420d3b66abdd4ddc6e Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Mon, 25 Jun 2018 10:07:13 -0700 Subject: [PATCH 20/74] update to latest version of CNI plugin (#3345) https://github.com/containernetworking/plugins/releases/tag/v0.7.1 --- pkg/acsengine/defaults.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index 3df0548054..9c814debc2 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -24,7 +24,7 @@ const ( AzureCniPluginVer = "v1.0.4" // CNIPluginVer specifies the version of CNI implementation // https://github.com/containernetworking/plugins - CNIPluginVer = "v0.7.0" + CNIPluginVer = "v0.7.1" ) var ( From fcbf0b4345bd25a4ad94498b1e66f8b721545598 Mon Sep 17 00:00:00 2001 From: Jess Frazelle Date: Mon, 25 Jun 2018 13:55:02 -0400 Subject: [PATCH 21/74] Remove docker (#2973) --- .../kubernetes-config/containerd.json | 40 ------------------- .../kubernetes/kubernetes-config/flannel.json | 39 ------------------ parts/k8s/kubernetesagentcustomdata.yml | 9 ++++- parts/k8s/kubernetescustomscript.sh | 10 ++++- parts/k8s/kubernetesmastercustomdata.yml | 5 +++ pkg/api/types.go | 6 +++ pkg/api/vlabs/types.go | 6 +++ 7 files changed, 32 insertions(+), 83 deletions(-) delete mode 100644 examples/e2e-tests/kubernetes/kubernetes-config/containerd.json delete mode 100644 examples/e2e-tests/kubernetes/kubernetes-config/flannel.json diff --git a/examples/e2e-tests/kubernetes/kubernetes-config/containerd.json b/examples/e2e-tests/kubernetes/kubernetes-config/containerd.json deleted file mode 100644 index facad5df83..0000000000 --- a/examples/e2e-tests/kubernetes/kubernetes-config/containerd.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "apiVersion": "vlabs", - "properties": { - "orchestratorProfile": { - "orchestratorType": "Kubernetes", - "orchestratorRelease": "1.10", - "kubernetesConfig": { - "networkPlugin": "flannel", - "containerRuntime": "containerd" - } - }, - "masterProfile": { - "count": 1, - "dnsPrefix": "", - "vmSize": "Standard_D2_v2" - }, - "agentPoolProfiles": [ - { - "name": "agentpool1", - "count": 3, - "vmSize": "Standard_D2_v2", - "availabilityProfile": "AvailabilitySet" - } - ], - "linuxProfile": { - "adminUsername": "azureuser", - "ssh": { - "publicKeys": [ - { - "keyData": "" - } - ] - } - }, - "servicePrincipalProfile": { - "clientId": "", - "secret": "" - } - } -} diff --git a/examples/e2e-tests/kubernetes/kubernetes-config/flannel.json b/examples/e2e-tests/kubernetes/kubernetes-config/flannel.json deleted file mode 100644 index d015d34c53..0000000000 --- a/examples/e2e-tests/kubernetes/kubernetes-config/flannel.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "apiVersion": "vlabs", - "properties": { - "orchestratorProfile": { - "orchestratorType": "Kubernetes", - "kubernetesConfig": { - "networkPlugin":"flannel", - } - }, - "masterProfile": { - "count": 1, - "dnsPrefix": "", - "vmSize": "Standard_D2_v2" - }, - "agentPoolProfiles": [ - { - "name": "linuxpool1", - "count": 3, - "vmSize": "Standard_D2_v2", - "availabilityProfile": "AvailabilitySet" - } - ], - "linuxProfile": { - "adminUsername": "azureuser", - "ssh": { - "publicKeys": [ - { - "keyData": "" - } - ] - } - }, - "servicePrincipalProfile": { - "clientId": "", - "secret": "" - }, - "certificateProfile": {} - } -} diff --git a/parts/k8s/kubernetesagentcustomdata.yml b/parts/k8s/kubernetesagentcustomdata.yml index d5b6eece1c..7466b748fb 100644 --- a/parts/k8s/kubernetesagentcustomdata.yml +++ b/parts/k8s/kubernetesagentcustomdata.yml @@ -8,14 +8,15 @@ write_files: content: !!binary | {{WrapAsVariable "provisionSource"}} -{{if not .IsCoreOS}} +{{if .KubernetesConfig.RequiresDocker}} + {{if not .IsCoreOS}} - path: "/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf" permissions: "0644" owner: "root" content: | [Service] MountFlags=shared -{{end}} + {{end}} - path: "/etc/systemd/system/docker.service.d/exec_start.conf" permissions: "0644" owner: "root" @@ -43,6 +44,7 @@ write_files: } }{{end}} } +{{end}} - path: "/etc/kubernetes/certs/ca.crt" permissions: "0644" @@ -139,7 +141,10 @@ AGENT_ARTIFACTS_CONFIG_PLACEHOLDER content: | #!/bin/bash /usr/bin/mkdir -p /etc/kubernetes/manifests + + {{if .KubernetesConfig.RequiresDocker}} usermod -aG docker {{WrapAsVariable "username"}} + {{end}} systemctl enable rpcbind systemctl enable rpc-statd diff --git a/parts/k8s/kubernetescustomscript.sh b/parts/k8s/kubernetescustomscript.sh index 726d29f349..a44cd313cf 100644 --- a/parts/k8s/kubernetescustomscript.sh +++ b/parts/k8s/kubernetescustomscript.sh @@ -337,6 +337,7 @@ function installContainerd() { retrycmd_get_tarball 60 5 "$CONTAINERD_TGZ_TMP" "$CONTAINERD_DOWNLOAD_URL" tar -xzf "$CONTAINERD_TGZ_TMP" -C / rm -f "$CONTAINERD_TGZ_TMP" + sed -i '/\[Service\]/a ExecStartPost=\/sbin\/iptables -P FORWARD ACCEPT' /etc/systemd/system/containerd.service echo "Successfully installed cri-containerd..." if [[ "$CONTAINER_RUNTIME" == "clear-containers" ]] || [[ "$CONTAINER_RUNTIME" == "containerd" ]]; then @@ -511,9 +512,14 @@ if [ -f $CUSTOM_SEARCH_DOMAIN_SCRIPT ]; then fi installDeps -installDocker + +if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + installDocker + ensureDocker +fi + configureK8s -ensureDocker + configNetworkPlugin if [[ ! -z "${MASTER_NODE}" ]]; then diff --git a/parts/k8s/kubernetesmastercustomdata.yml b/parts/k8s/kubernetesmastercustomdata.yml index cad59d67f2..0ad36b94b1 100644 --- a/parts/k8s/kubernetesmastercustomdata.yml +++ b/parts/k8s/kubernetesmastercustomdata.yml @@ -14,6 +14,7 @@ write_files: content: !!binary | {{WrapAsVariable "provisionSource"}} +{{if .OrchestratorProfile.KubernetesConfig.RequiresDocker}} {{if not .MasterProfile.IsCoreOS}} - path: "/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf" permissions: "0644" @@ -46,6 +47,7 @@ write_files: "max-file": "5" } } +{{end}} - path: "/etc/kubernetes/certs/ca.crt" permissions: "0644" @@ -376,7 +378,10 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER systemctl restart etcd-member retrycmd_if_failure 5 5 10 curl --retry 5 --retry-delay 10 --retry-max-time 10 --max-time 60 http://127.0.0.1:2379/v2/machines mkdir -p /etc/kubernetes/manifests + + {{if .OrchestratorProfile.KubernetesConfig.RequiresDocker}} usermod -aG docker {{WrapAsVariable "username"}} + {{end}} {{if EnableAggregatedAPIs}} sudo bash /etc/kubernetes/generate-proxy-certs.sh diff --git a/pkg/api/types.go b/pkg/api/types.go index 5fc5793b40..d486f04022 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -978,3 +978,9 @@ func (k *KubernetesConfig) PrivateJumpboxProvision() bool { } return false } + +// RequiresDocker returns if the kubernetes settings require docker to be installed. +func (k *KubernetesConfig) RequiresDocker() bool { + runtime := strings.ToLower(k.ContainerRuntime) + return runtime == "docker" || runtime == "" +} diff --git a/pkg/api/vlabs/types.go b/pkg/api/vlabs/types.go index 0ca4e39127..507e36d225 100644 --- a/pkg/api/vlabs/types.go +++ b/pkg/api/vlabs/types.go @@ -633,3 +633,9 @@ func (l *LinuxProfile) HasCustomNodesDNS() bool { func (o *OrchestratorProfile) IsSwarmMode() bool { return o.OrchestratorType == SwarmMode } + +// RequiresDocker returns if the kubernetes settings require docker to be installed. +func (k *KubernetesConfig) RequiresDocker() bool { + runtime := strings.ToLower(k.ContainerRuntime) + return runtime == "docker" || runtime == "" +} From c70ce4a7bb442e34dccd74cce0c464645396f8b0 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Mon, 25 Jun 2018 11:03:28 -0700 Subject: [PATCH 22/74] Fix OMS addon + add tests and documentation for addons (#3342) --- docs/clusterdefinition.md | 8 ++- .../kubernetes-config/addons-disabled.json | 8 +++ .../kubernetes-config/addons-enabled.json | 64 +++++++++++++++++++ ...netesmasteraddons-omsagent-daemonset.yaml} | 0 parts/k8s/kubernetesmastercustomdata.yml | 18 +++--- pkg/acsengine/addons.go | 2 +- 6 files changed, 87 insertions(+), 13 deletions(-) create mode 100644 examples/e2e-tests/kubernetes/kubernetes-config/addons-enabled.json rename parts/k8s/addons/{omsagent-daemonset.yaml => kubernetesmasteraddons-omsagent-daemonset.yaml} (100%) diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index 1dfd4181dc..0a3bc5ad33 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -68,9 +68,11 @@ Here are the valid values for the orchestrator types: | Name of addon | Enabled by default? | How many containers | Description | | --------------------------------------------------------------------- | ------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | tiller | true | 1 | Delivers the Helm server-side component: tiller. See https://github.com/kubernetes/helm for more info | -| kubernetes-dashboard | true | 1 | Delivers the kubernetes dashboard component. See https://github.com/kubernetes/dashboard for more info | -| rescheduler | false | 1 | Delivers the kubernetes rescheduler component | -| [cluster-autoscaler](../examples/addons/cluster-autoscaler/README.md) | false | 1 | Delivers the kubernetes cluster autoscaler component. See https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/azure for more info | +| kubernetes-dashboard | true | 1 | Delivers the Kubernetes dashboard component. See https://github.com/kubernetes/dashboard for more info | +| rescheduler | false | 1 | Delivers the Kubernetes rescheduler component | +| [cluster-autoscaler](../examples/addons/cluster-autoscaler/README.md) | false | 1 | Delivers the Kubernetes cluster autoscaler component. See https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/azure for more info | +| [nvidia-device-plugin](../examples/addons/nvidia-device-plugin/README.md) | true if using a Kubernetes cluster (v1.10+) with an N-series agent pool | 1 | Delivers the Kubernetes NVIDIA device plugin component. See https://github.com/NVIDIA/k8s-device-plugin for more info | +| container-monitoring | false | 1 | Delivers the Kubernetes container monitoring component | To give a bit more info on the `addons` property: We've tried to expose the basic bits of data that allow useful configuration of these cluster features. Here are some example usage patterns that will unpack what `addons` provide: diff --git a/examples/e2e-tests/kubernetes/kubernetes-config/addons-disabled.json b/examples/e2e-tests/kubernetes/kubernetes-config/addons-disabled.json index 9884671690..ebd3964690 100644 --- a/examples/e2e-tests/kubernetes/kubernetes-config/addons-disabled.json +++ b/examples/e2e-tests/kubernetes/kubernetes-config/addons-disabled.json @@ -20,6 +20,14 @@ { "name": "rescheduler", "enabled": false + }, + { + "name": "nvidia-device-plugin", + "enabled": false + }, + { + "name": "container-monitoring", + "enabled": false } ] } diff --git a/examples/e2e-tests/kubernetes/kubernetes-config/addons-enabled.json b/examples/e2e-tests/kubernetes/kubernetes-config/addons-enabled.json new file mode 100644 index 0000000000..b07d50d23b --- /dev/null +++ b/examples/e2e-tests/kubernetes/kubernetes-config/addons-enabled.json @@ -0,0 +1,64 @@ +{ + "apiVersion": "vlabs", + "properties": { + "orchestratorProfile": { + "orchestratorType": "Kubernetes", + "orchestratorRelease": "1.10", + "kubernetesConfig": { + "addons": [ + { + "name": "tiller", + "enabled": true + }, + { + "name": "aci-connector", + "enabled": true + }, + { + "name": "kubernetes-dashboard", + "enabled": true + }, + { + "name": "rescheduler", + "enabled": true + }, + { + "name": "nvidia-device-plugin", + "enabled": true + }, + { + "name": "container-monitoring", + "enabled": true + } + ] + } + }, + "masterProfile": { + "count": 1, + "dnsPrefix": "", + "vmSize": "Standard_D2_v2" + }, + "agentPoolProfiles": [ + { + "name": "linuxpool1", + "count": 3, + "vmSize": "Standard_D2_v2" + } + ], + "linuxProfile": { + "adminUsername": "azureuser", + "ssh": { + "publicKeys": [ + { + "keyData": "" + } + ] + } + }, + "servicePrincipalProfile": { + "clientId": "", + "secret": "" + }, + "certificateProfile": {} + } +} \ No newline at end of file diff --git a/parts/k8s/addons/omsagent-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-omsagent-daemonset.yaml similarity index 100% rename from parts/k8s/addons/omsagent-daemonset.yaml rename to parts/k8s/addons/kubernetesmasteraddons-omsagent-daemonset.yaml diff --git a/parts/k8s/kubernetesmastercustomdata.yml b/parts/k8s/kubernetesmastercustomdata.yml index 0ad36b94b1..9a23f98f95 100644 --- a/parts/k8s/kubernetesmastercustomdata.yml +++ b/parts/k8s/kubernetesmastercustomdata.yml @@ -290,15 +290,15 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER sed -i "s||{{WrapAsVariable "searchDomainRealmPassword"}}|g" "/opt/azure/containers/setup-custom-search-domains.sh" {{end}} {{if .OrchestratorProfile.IsContainerMonitoringEnabled}} - sed -i "s||{{WrapAsVariable "omsAgentVersion"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "dockerProviderVersion"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesContainerMonitoringSpec"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "workspaceGuid"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "workspaceKey"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesOMSAgentCPURequests"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesOMSAgentMemoryRequests"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesOMSAgentCPULimit"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesOMSAgentMemoryLimit"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentVersion"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentDockerProviderVersion"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentImage"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentWorkspaceGuid"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentWorkspaceKey"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "kubernetesOMSAgentCPURequests"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "kubernetesOMSAgentMemoryRequests"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "kubernetesOMSAgentCPULimit"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "kubernetesOMSAgentMemoryLimit"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" {{end}} - path: "/opt/azure/containers/provision.sh" diff --git a/pkg/acsengine/addons.go b/pkg/acsengine/addons.go index 83ffeced38..7c707843e1 100644 --- a/pkg/acsengine/addons.go +++ b/pkg/acsengine/addons.go @@ -103,7 +103,7 @@ func kubernetesAddonSettingsInit(profile *api.Properties) []kubernetesFeatureSet profile.OrchestratorProfile.IsMetricsServerEnabled(), }, { - "omsagent-daemonset.yaml", + "kubernetesmasteraddons-omsagent-daemonset.yaml", "omsagent-daemonset.yaml", profile.OrchestratorProfile.IsContainerMonitoringEnabled(), }, From ffc7707b169673b494cc024569fcff53b963d053 Mon Sep 17 00:00:00 2001 From: Diwakar Date: Tue, 26 Jun 2018 01:47:22 +0630 Subject: [PATCH 23/74] Doc update: fix example kubernetes-vmas.json link (#3353) --- docs/kubernetes/features.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/kubernetes/features.md b/docs/kubernetes/features.md index fdc5a619ad..554fd28297 100644 --- a/docs/kubernetes/features.md +++ b/docs/kubernetes/features.md @@ -2,7 +2,7 @@ |Feature|Status|API Version|Example|Description| |---|---|---|---|---| -|Managed Disks|Beta|`vlabs`|[kubernetes-vmas.json](../../examples/disks-managed/kubernetes-vmss.json)|[Description](#feat-managed-disks)| +|Managed Disks|Beta|`vlabs`|[kubernetes-vmas.json](../../examples/disks-managed/kubernetes-vmas.json)|[Description](#feat-managed-disks)| |Calico Network Policy|Alpha|`vlabs`|[kubernetes-calico.json](../../examples/networkpolicy/kubernetes-calico.json)|[Description](#feat-calico)| |Cilium Network Policy|Alpha|`vlabs`|[kubernetes-cilium.json](../../examples/networkpolicy/kubernetes-cilium.json)|[Description](#feat-cilium)| |Custom VNET|Beta|`vlabs`|[kubernetesvnet-azure-cni.json](../../examples/vnet/kubernetesvnet-azure-cni.json)|[Description](#feat-custom-vnet)| @@ -370,4 +370,4 @@ To get `objectId` of the service principal: ```console az ad sp list --spn -``` \ No newline at end of file +``` From 690ac5c6f002468009fa3274f821312f36dc808b Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Mon, 25 Jun 2018 14:14:37 -0700 Subject: [PATCH 24/74] Added more unit tests (#3351) --- pkg/api/agentPoolOnlyApi/vlabs/types_test.go | 28 +++- pkg/api/common/versions_test.go | 12 ++ pkg/api/v20160330/types_test.go | 68 ++++++++++ pkg/api/v20160930/types.go | 2 +- pkg/api/v20160930/types_test.go | 45 +++++- pkg/api/v20170131/types_test.go | 2 +- pkg/api/v20170701/types_test.go | 66 ++++++++- pkg/api/vlabs/types_test.go | 136 +++++++++++++++++++ pkg/api/vlabs/validate.go | 4 +- pkg/api/vlabs/validate_test.go | 123 +++++++++++++++++ pkg/armhelpers/deploymentError_test.go | 66 +++++++++ pkg/armhelpers/mockclients.go | 59 +++++++- pkg/armhelpers/utils/util.go | 4 - pkg/armhelpers/utils/util_test.go | 34 ++++- pkg/helpers/helpers_test.go | 2 +- 15 files changed, 632 insertions(+), 19 deletions(-) diff --git a/pkg/api/agentPoolOnlyApi/vlabs/types_test.go b/pkg/api/agentPoolOnlyApi/vlabs/types_test.go index 17b5cf1fbc..0cedb47129 100644 --- a/pkg/api/agentPoolOnlyApi/vlabs/types_test.go +++ b/pkg/api/agentPoolOnlyApi/vlabs/types_test.go @@ -7,7 +7,7 @@ import ( func TestAgentPoolProfile(t *testing.T) { // With osType not specified - AgentPoolProfileText := "{\"count\" : 0}" + AgentPoolProfileText := `{ "name": "linuxpool1", "count": 0, "vmSize": "Standard_D2_v2", "availabilityProfile": "AvailabilitySet" }` ap := &AgentPoolProfile{} if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) @@ -17,11 +17,35 @@ func TestAgentPoolProfile(t *testing.T) { t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") } - if ap.OSType != Linux { + if !ap.IsLinux() { t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") } if !ap.IsStorageAccount() { t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != StorageAccount after unmarshal") } + + // With osType specified + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + if !ap.IsCustomVNET() { + t.Fatalf("unexpectedly detected empty AgentPoolProfile.VNetSubnetID after unmarshal") + } } diff --git a/pkg/api/common/versions_test.go b/pkg/api/common/versions_test.go index dc9e4f9e0f..15c08dfe4d 100644 --- a/pkg/api/common/versions_test.go +++ b/pkg/api/common/versions_test.go @@ -340,6 +340,11 @@ func Test_GetValidPatchVersion(t *testing.T) { t.Errorf("It is not the default Kubernetes version") } + v = GetValidPatchVersion(Mesos, "1.6.0", false) + if v != "" { + t.Errorf("Expected empty version for unsupported orchType") + } + for version, enabled := range AllKubernetesWindowsSupportedVersions { if enabled { v = GetValidPatchVersion(Kubernetes, version, true) @@ -411,6 +416,13 @@ func TestGetMaxVersion(t *testing.T) { t.Errorf("GetMaxVersion returned the wrong max version, expected %s, got %s", expected, max) } + expected = "1.1.2" + versions = []string{"1.1.1", "1.0.0-alpha.1", expected} + max = GetMaxVersion(versions, true) + if max != expected { + t.Errorf("GetMaxVersion returned the wrong max version, expected %s, got %s", expected, max) + } + expected = "" versions = []string{} max = GetMaxVersion(versions, false) diff --git a/pkg/api/v20160330/types_test.go b/pkg/api/v20160330/types_test.go index 7dc5e65abd..87061feab1 100644 --- a/pkg/api/v20160330/types_test.go +++ b/pkg/api/v20160330/types_test.go @@ -1,6 +1,7 @@ package v20160330 import ( + "encoding/json" "testing" ) @@ -18,3 +19,70 @@ func TestIsDCOS(t *testing.T) { t.Fatalf("unexpectedly detected DCOS orchestrator profile from OrchestratorType=%s", kubernetesProfile.OrchestratorType) } } + +func TestAgentPoolProfile(t *testing.T) { + // With osType not specified + AgentPoolProfileText := `{ "name": "linuxpool1", "count": 0, "vmSize": "Standard_D2_v2", "availabilityProfile": "AvailabilitySet" }` + ap := &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsLinux() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") + } + + // With osType specified + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } +} + +func TestMasterProfile(t *testing.T) { + MasterProfileText := `{ "count": 0, "dnsPrefix": "", "vmSize": "Standard_D2_v2" }` + mp := &MasterProfile{} + if e := json.Unmarshal([]byte(MasterProfileText), mp); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for MasterProfile, %+v", e) + } +} + +func TestOrchestratorProfile(t *testing.T) { + OrchestratorProfileText := `{ "orchestratorType": "Mesos" }` + op := &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Swarm" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "DCOS" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Kubernetes" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e == nil { + t.Fatalf("expected unmarshal failure for OrchestratorProfile") + } +} diff --git a/pkg/api/v20160930/types.go b/pkg/api/v20160930/types.go index cdaf13dc77..87b665dafa 100644 --- a/pkg/api/v20160930/types.go +++ b/pkg/api/v20160930/types.go @@ -176,7 +176,7 @@ func (a *AgentPoolProfile) UnmarshalJSON(b []byte) error { return nil } -// JumpboxProfile dscribes properties of the jumpbox setup +// JumpboxProfile describes properties of the jumpbox setup // in the ACS container cluster. type JumpboxProfile struct { OSType OSType `json:"osType,omitempty"` diff --git a/pkg/api/v20160930/types_test.go b/pkg/api/v20160930/types_test.go index 6c6b44d976..3ac851506a 100644 --- a/pkg/api/v20160930/types_test.go +++ b/pkg/api/v20160930/types_test.go @@ -44,7 +44,50 @@ func TestAgentPoolProfile(t *testing.T) { t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") } - if ap.OSType != Linux { + if !ap.IsLinux() { t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") } + + // With osType specified + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } +} + +func TestOrchestratorProfile(t *testing.T) { + OrchestratorProfileText := `{ "orchestratorType": "Mesos" }` + op := &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Swarm" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "DCOS" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Kubernetes" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + + } } diff --git a/pkg/api/v20170131/types_test.go b/pkg/api/v20170131/types_test.go index 98df028cb6..8afb3509dc 100644 --- a/pkg/api/v20170131/types_test.go +++ b/pkg/api/v20170131/types_test.go @@ -44,7 +44,7 @@ func TestAgentPoolProfile(t *testing.T) { t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") } - if ap.OSType != Linux { + if !ap.IsLinux() { t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") } } diff --git a/pkg/api/v20170701/types_test.go b/pkg/api/v20170701/types_test.go index 525a84fe9a..df6b4d79db 100644 --- a/pkg/api/v20170701/types_test.go +++ b/pkg/api/v20170701/types_test.go @@ -23,7 +23,7 @@ func TestMasterProfile(t *testing.T) { func TestAgentPoolProfile(t *testing.T) { // With osType not specified - AgentPoolProfileText := "{\"count\" : 0}" + AgentPoolProfileText := `{"count" : 0, "storageProfile" : "StorageAccount"}` ap := &AgentPoolProfile{} if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) @@ -33,7 +33,69 @@ func TestAgentPoolProfile(t *testing.T) { t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") } - if ap.OSType != Linux { + if !ap.IsLinux() { t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") } + + if !ap.IsStorageAccount() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + // With osType specified + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } +} + +func TestOrchestratorProfile(t *testing.T) { + OrchestratorProfileText := `{ "orchestratorType": "Mesos" }` + op := &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e == nil { + t.Fatalf("expected unmarshal failure for OrchestratorProfile when passing an invalid orchestratorType") + } + + OrchestratorProfileText = `{ "orchestratorType": "Swarm" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "DockerCE" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + if !op.IsSwarmMode() { + t.Fatalf("unexpectedly detected OrchestratorProfile.Type != DockerCE after unmarshal") + + } + + OrchestratorProfileText = `{ "orchestratorType": "DCOS" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Kubernetes" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + + } } diff --git a/pkg/api/vlabs/types_test.go b/pkg/api/vlabs/types_test.go index 2147307fda..cea24440e4 100644 --- a/pkg/api/vlabs/types_test.go +++ b/pkg/api/vlabs/types_test.go @@ -1,6 +1,7 @@ package vlabs import ( + "encoding/json" "testing" ) @@ -41,3 +42,138 @@ func TestKubernetesAddon(t *testing.T) { t.Fatalf("KubernetesAddon.IsEnabled(true) should always return false when Enabled property is set to false") } } + +func TestOrchestratorProfile(t *testing.T) { + OrchestratorProfileText := `{ "orchestratorType": "Mesos" }` + op := &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e == nil { + t.Fatalf("expected unmarshal failure for OrchestratorProfile when passing an invalid orchestratorType") + } + + OrchestratorProfileText = `{ "orchestratorType": "Swarm" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "SwarmMode" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + if !op.IsSwarmMode() { + t.Fatalf("unexpectedly detected OrchestratorProfile.Type != DockerCE after unmarshal") + + } + + OrchestratorProfileText = `{ "orchestratorType": "DCOS" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Kubernetes" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + + } +} + +func TestAgentPoolProfile(t *testing.T) { + // With osType not specified + AgentPoolProfileText := `{"count" : 0, "storageProfile" : "StorageAccount", "vnetSubnetID" : "1234"}` + ap := &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 0 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsCustomVNET() { + t.Fatalf("unexpectedly detected nil AgentPoolProfile.VNetSubNetID after unmarshal") + } + + if !ap.IsStorageAccount() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + // With osType Windows + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + // With osType Linux and RHEL distro + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Linux", "distro" : "rhel", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsLinux() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") + } + + if !ap.IsRHEL() { + t.Fatalf("unexpectedly detected AgentPoolProfile.Distro != RHEL after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + // With osType Linux and coreos distro + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Linux", "distro" : "coreos", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "VirtualMachineScaleSets", "storageProfile" : "ManagedDisks", "diskSizesGB" : [750, 250, 600, 1000] }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsLinux() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") + } + + if !ap.IsCoreOS() { + t.Fatalf("unexpectedly detected AgentPoolProfile.Distro != CoreOS after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + if !ap.HasDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.DiskSizesGB < 0 after unmarshal") + } + + if !ap.IsVirtualMachineScaleSets() { + t.Fatalf("unexpectedly detected AgentPoolProfile.AvailabilitySets != VirtualMachineScaleSets after unmarshal") + } +} diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index 0515cf3a74..44a94220c9 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -503,8 +503,8 @@ func (a *Properties) validateVNET() error { func (a *Properties) validateServicePrincipalProfile() error { if a.OrchestratorProfile.OrchestratorType == Kubernetes { - useManagedIdentity := (a.OrchestratorProfile.KubernetesConfig != nil && - a.OrchestratorProfile.KubernetesConfig.UseManagedIdentity) + useManagedIdentity := a.OrchestratorProfile.KubernetesConfig != nil && + a.OrchestratorProfile.KubernetesConfig.UseManagedIdentity if !useManagedIdentity { if a.ServicePrincipalProfile == nil { diff --git a/pkg/api/vlabs/validate_test.go b/pkg/api/vlabs/validate_test.go index 569de26395..b26cc5f973 100644 --- a/pkg/api/vlabs/validate_test.go +++ b/pkg/api/vlabs/validate_test.go @@ -27,6 +27,9 @@ const ( ValidKubernetesCloudProviderRateLimitBucket = 10 ) +var falseVal = false +var trueVal = true + func Test_OrchestratorProfile_Validate(t *testing.T) { tests := map[string]struct { properties *Properties @@ -44,6 +47,104 @@ func Test_OrchestratorProfile_Validate(t *testing.T) { }, expectedError: "KubernetesConfig can be specified only when OrchestratorType is Kubernetes or OpenShift", }, + "should error when KubernetesConfig has invalid etcd version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + KubernetesConfig: &KubernetesConfig{ + EtcdVersion: "1.0.0", + }, + }, + }, + expectedError: "Invalid etcd version \"1.0.0\", please use one of the following versions: [2.2.5 2.3.0 2.3.1 2.3.2 2.3.3 2.3.4 2.3.5 2.3.6 2.3.7 2.3.8 3.0.0 3.0.1 3.0.2 3.0.3 3.0.4 3.0.5 3.0.6 3.0.7 3.0.8 3.0.9 3.0.10 3.0.11 3.0.12 3.0.13 3.0.14 3.0.15 3.0.16 3.0.17 3.1.0 3.1.1 3.1.2 3.1.2 3.1.3 3.1.4 3.1.5 3.1.6 3.1.7 3.1.8 3.1.9 3.1.10 3.2.0 3.2.1 3.2.2 3.2.3 3.2.4 3.2.5 3.2.6 3.2.7 3.2.8 3.2.9 3.2.11 3.2.12 3.2.13 3.2.14 3.2.15 3.2.16 3.3.0 3.3.1]", + }, + "should error when KubernetesConfig has enableAggregatedAPIs enabled with an invalid version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.6.6", + KubernetesConfig: &KubernetesConfig{ + EnableAggregatedAPIs: true, + }, + }, + }, + expectedError: "enableAggregatedAPIs is only available in Kubernetes version 1.7.0 or greater; unable to validate for Kubernetes version 1.6.6", + }, + "should error when KubernetesConfig has enableAggregatedAPIs enabled and enableRBAC disabled": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.7.0", + KubernetesConfig: &KubernetesConfig{ + EnableAggregatedAPIs: true, + EnableRbac: &falseVal, + }, + }, + }, + expectedError: "enableAggregatedAPIs requires the enableRbac feature as a prerequisite", + }, + "should error when KubernetesConfig has enableDataEncryptionAtRest enabled with invalid version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.6.6", + KubernetesConfig: &KubernetesConfig{ + EnableDataEncryptionAtRest: &trueVal, + }, + }, + }, + expectedError: "enableDataEncryptionAtRest is only available in Kubernetes version 1.7.0 or greater; unable to validate for Kubernetes version 1.6.6", + }, + "should error when KubernetesConfig has enableDataEncryptionAtRest enabled with invalid encryption key": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.7.0", + KubernetesConfig: &KubernetesConfig{ + EnableDataEncryptionAtRest: &trueVal, + EtcdEncryptionKey: "fakeEncryptionKey", + }, + }, + }, + expectedError: "etcdEncryptionKey must be base64 encoded. Please provide a valid base64 encoded value or leave the etcdEncryptionKey empty to auto-generate the value", + }, + "should error when KubernetesConfig has enableEncryptionWithExternalKms enabled with invalid version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.6.6", + KubernetesConfig: &KubernetesConfig{ + EnableEncryptionWithExternalKms: &trueVal, + }, + }, + }, + expectedError: "enableEncryptionWithExternalKms is only available in Kubernetes version 1.10.0 or greater; unable to validate for Kubernetes version 1.6.6", + }, + "should error when KubernetesConfig has enablePodSecurity enabled with invalid settings": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.7.0", + KubernetesConfig: &KubernetesConfig{ + EnablePodSecurityPolicy: &trueVal, + }, + }, + }, + expectedError: "enablePodSecurityPolicy requires the enableRbac feature as a prerequisite", + }, + "should error when KubernetesConfig has enablePodSecurity enabled with invalid version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.7.0", + KubernetesConfig: &KubernetesConfig{ + EnableRbac: &trueVal, + EnablePodSecurityPolicy: &trueVal, + }, + }, + }, + expectedError: "enablePodSecurityPolicy is only supported in acs-engine for Kubernetes version 1.8.0 or greater; unable to validate for Kubernetes version 1.7.0", + }, "should not error with empty object": { properties: &Properties{ OrchestratorProfile: &OrchestratorProfile{ @@ -52,6 +153,28 @@ func Test_OrchestratorProfile_Validate(t *testing.T) { }, }, }, + "should error when DcosConfig orchestrator has invalid configuration": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "DCOS", + OrchestratorVersion: "1.12.0", + }, + }, + expectedError: "the following OrchestratorProfile configuration is not supported: OrchestratorType: DCOS, OrchestratorRelease: , OrchestratorVersion: 1.12.0. Please check supported Release or Version for this build of acs-engine", + }, + "should error when DcosConfig orchestrator configuration has invalid static IP": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "DCOS", + DcosConfig: &DcosConfig{ + BootstrapProfile: &BootstrapProfile{ + StaticIP: "0.0.0.0.0.0", + }, + }, + }, + }, + expectedError: "DcosConfig.BootstrapProfile.StaticIP '0.0.0.0.0.0' is an invalid IP address", + }, "should error when DcosConfig populated for non-Kubernetes OrchestratorType 1": { properties: &Properties{ OrchestratorProfile: &OrchestratorProfile{ diff --git a/pkg/armhelpers/deploymentError_test.go b/pkg/armhelpers/deploymentError_test.go index 0d288d5e18..16e1e1148a 100644 --- a/pkg/armhelpers/deploymentError_test.go +++ b/pkg/armhelpers/deploymentError_test.go @@ -6,6 +6,9 @@ import ( . "github.com/Azure/acs-engine/pkg/test" . "github.com/onsi/gomega" + "fmt" + + "github.com/Azure/azure-sdk-for-go/arm/resources/resources" . "github.com/onsi/ginkgo" log "github.com/sirupsen/logrus" ) @@ -63,4 +66,67 @@ var _ = Describe("Template deployment tests", func() { Expect(string(deplErr.Response)).To(ContainSubstring("\"code\":\"Conflict\"")) Expect(len(deplErr.OperationsLists)).To(Equal(0)) }) + + It("Should return deployment error with Operations Lists", func() { + mockClient := &MockACSEngineClient{} + mockClient.FailDeployTemplateWithProperties = true + logger := log.NewEntry(log.New()) + + err := DeployTemplateSync(mockClient, logger, "rg1", "agentvm", map[string]interface{}{}, map[string]interface{}{}) + Expect(err).NotTo(BeNil()) + deplErr, ok := err.(*DeploymentError) + Expect(ok).To(BeTrue()) + Expect(deplErr.TopError).NotTo(BeNil()) + Expect(deplErr.ProvisioningState).To(Equal("Failed")) + Expect(deplErr.StatusCode).To(Equal(200)) + Expect(string(deplErr.Response)).To(ContainSubstring("\"code\":\"Conflict\"")) + Expect(len(deplErr.OperationsLists)).To(Equal(2)) + }) + + It("Should return nil on success", func() { + mockClient := &MockACSEngineClient{} + logger := log.NewEntry(log.New()) + err := DeployTemplateSync(mockClient, logger, "rg1", "agentvm", map[string]interface{}{}, map[string]interface{}{}) + Expect(err).To(BeNil()) + }) }) + +func TestDeploymentError_Error(t *testing.T) { + operationsLists := make([]resources.DeploymentOperationsListResult, 0) + operationsList := resources.DeploymentOperationsListResult{} + operations := make([]resources.DeploymentOperation, 0) + id := "1234" + oID := "342" + provisioningState := "Failed" + status := map[string]interface{}{ + "message": "sample status message", + } + properties := resources.DeploymentOperationProperties{ + ProvisioningState: &provisioningState, + StatusMessage: &status, + } + operation1 := resources.DeploymentOperation{ + ID: &id, + OperationID: &oID, + Properties: &properties, + } + operations = append(operations, operation1) + operationsList.Value = &operations + operationsLists = append(operationsLists, operationsList) + deploymentErr := &DeploymentError{ + DeploymentName: "agentvm", + ResourceGroup: "rg1", + TopError: fmt.Errorf("sample error"), + ProvisioningState: "Failed", + Response: []byte("sample resp"), + StatusCode: 500, + OperationsLists: operationsLists, + } + errString := deploymentErr.Error() + expected := `DeploymentName[agentvm] ResourceGroup[rg1] TopError[sample error] StatusCode[500] Response[sample resp] ProvisioningState[Failed] Operations[{ + "message": "sample status message" +}]` + if errString != expected { + t.Errorf("expected error with message %s, but got %s", expected, errString) + } +} diff --git a/pkg/armhelpers/mockclients.go b/pkg/armhelpers/mockclients.go index b4820a9f23..1438b03cf3 100644 --- a/pkg/armhelpers/mockclients.go +++ b/pkg/armhelpers/mockclients.go @@ -23,6 +23,7 @@ type MockACSEngineClient struct { FailDeployTemplate bool FailDeployTemplateQuota bool FailDeployTemplateConflict bool + FailDeployTemplateWithProperties bool FailEnsureResourceGroup bool FailListVirtualMachines bool FailListVirtualMachineScaleSets bool @@ -187,6 +188,29 @@ func (mc *MockACSEngineClient) DeployTemplate(resourceGroup, name string, templa }}}, errors.New(errmsg) + case mc.FailDeployTemplateWithProperties: + errmsg := `resources.DeploymentsClient#CreateOrUpdate: Failure sending request: StatusCode=200 -- Original Error: Long running operation terminated with status 'Failed': Code="DeploymentFailed" Message="At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/arm-debug for usage details.` + resp := `{ +"status":"Failed", +"error":{ + "code":"DeploymentFailed", + "message":"At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/arm-debug for usage details.", + "details":[{ + "code":"Conflict", + "message":"{\r\n \"error\": {\r\n \"code\": \"PropertyChangeNotAllowed\",\r\n \"target\": \"dataDisk.createOption\",\r\n \"message\": \"Changing property 'dataDisk.createOption' is not allowed.\"\r\n }\r\n}" +}]}}` + provisioningState := "Failed" + return &resources.DeploymentExtended{ + Response: autorest.Response{ + Response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(resp))), + }}, + Properties: &resources.DeploymentPropertiesExtended{ + ProvisioningState: &provisioningState, + }}, + errors.New(errmsg) default: return nil, nil } @@ -541,7 +565,40 @@ func (mc *MockACSEngineClient) ListProviders() (resources.ProviderListResult, er // ListDeploymentOperations gets all deployments operations for a deployment. func (mc *MockACSEngineClient) ListDeploymentOperations(resourceGroupName string, deploymentName string, top *int32) (result resources.DeploymentOperationsListResult, err error) { - return resources.DeploymentOperationsListResult{}, nil + resp := `{ + "properties": { + "provisioningState":"Failed", + "correlationId":"d5062e45-6e9f-4fd3-a0a0-6b2c56b15757", + "error":{ + "code":"DeploymentFailed","message":"At least one resource deployment operation failed. Please list deployment operations for details. Please see http://aka.ms/arm-debug for usage details.", + "details":[{"code":"Conflict","message":"{\r\n \"error\": {\r\n \"message\": \"Conflict\",\r\n \"code\": \"Conflict\"\r\n }\r\n}"}] + } + } +}` + + provisioningState := "Failed" + id := "00000000" + operationID := "d5062e45-6e9f-4fd3-a0a0-6b2c56b15757" + nextLink := fmt.Sprintf("https://management.azure.com/subscriptions/11111/resourcegroups/%s/deployments/%s/operations?$top=%s&api-version=2018-02-01", resourceGroupName, deploymentName, "5") + return resources.DeploymentOperationsListResult{ + Response: autorest.Response{ + Response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(resp))), + }, + }, + Value: &[]resources.DeploymentOperation{ + { + ID: &id, + OperationID: &operationID, + Properties: &resources.DeploymentOperationProperties{ + ProvisioningState: &provisioningState, + }, + }, + }, + NextLink: &nextLink, + }, nil } // ListDeploymentOperationsNextResults retrieves the next set of results, if any. diff --git a/pkg/armhelpers/utils/util.go b/pkg/armhelpers/utils/util.go index 05f9cb69fe..348cdb586f 100644 --- a/pkg/armhelpers/utils/util.go +++ b/pkg/armhelpers/utils/util.go @@ -120,10 +120,6 @@ func WindowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIn agentIndex, _ = strconv.Atoi(poolInfo[3:]) fmt.Printf("%d\n", agentIndex) - if err != nil { - return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err) - } - return poolPrefix, acsStr, poolIndex, agentIndex, nil } diff --git a/pkg/armhelpers/utils/util_test.go b/pkg/armhelpers/utils/util_test.go index eca047f87b..e4feb42b3c 100644 --- a/pkg/armhelpers/utils/util_test.go +++ b/pkg/armhelpers/utils/util_test.go @@ -157,16 +157,42 @@ func Test_GetK8sVMName(t *testing.T) { nameSuffix, agentPoolName string agentPoolIndex, agentIndex int expected string + expectedErr bool }{ - {api.Linux, true, "35953384", "agentpool1", 0, 2, "aks-agentpool1-35953384-2"}, - {api.Windows, false, "35953384", "agentpool1", 0, 2, "35953k8s9002"}, + {api.Linux, true, "35953384", "agentpool1", 0, 2, "aks-agentpool1-35953384-2", false}, + {api.Windows, false, "35953384", "agentpool1", 0, 2, "35953k8s9002", false}, + {"macOS", false, "35953384", "agentpool1", 0, 2, "", true}, } { vmName, err := GetK8sVMName(s.osType, s.isAKS, s.nameSuffix, s.agentPoolName, s.agentPoolIndex, s.agentIndex) - if err != nil { - t.Fatalf("unexpected error: %s", err) + + if !s.expectedErr { + if err != nil { + t.Fatalf("unexpected error: %s", err) + } } if vmName != s.expected { t.Fatalf("vmName %s, expected %s", vmName, s.expected) } } } + +func Test_ResourceName(t *testing.T) { + s := "https://vhdstorage8h8pjybi9hbsl6.blob.core.windows.net/vhds/osdisks/disk1234.vhd" + expected := "disk1234.vhd" + r, err := ResourceName(s) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if r != expected { + t.Fatalf("resourceName %s, expected %s", r, expected) + } +} + +func Test_ResourceNameInvalid(t *testing.T) { + s := "https://vhdstorage8h8pjybi9hbsl6.blob.core.windows.net/vhds/osdisks/" + expectedMsg := "resource name was missing from identifier" + _, err := ResourceName(s) + if err == nil || err.Error() != expectedMsg { + t.Fatalf("expected error with message: %s", expectedMsg) + } +} diff --git a/pkg/helpers/helpers_test.go b/pkg/helpers/helpers_test.go index 00e7ecaf9a..a9b6765c5f 100644 --- a/pkg/helpers/helpers_test.go +++ b/pkg/helpers/helpers_test.go @@ -30,7 +30,7 @@ func TestJSONMarshal(t *testing.T) { } } -func TestNormalizaAzureRegion(t *testing.T) { +func TestNormalizeAzureRegion(t *testing.T) { cases := []struct { input string expectedResult string From 8f045c6a9c6f5af5ba31d774773a045bf574bd94 Mon Sep 17 00:00:00 2001 From: Taylor Thomas Date: Mon, 25 Jun 2018 15:06:05 -0700 Subject: [PATCH 25/74] fix(addons): Adds missing cluster subnet substitution for flannel (#3356) The flannel configmap was using a hardcoded value for the cluster subnet, so when you used a custom one, it would not route traffic properly --- .../k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml | 2 +- parts/k8s/kubernetesmastercustomdata.yml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml index 2b4fa22b5b..3180d73e51 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml @@ -29,7 +29,7 @@ data: } net-conf.json: | { - "Network": "10.244.0.0/16", + "Network": "", "Backend": { "Type": "vxlan" } diff --git a/parts/k8s/kubernetesmastercustomdata.yml b/parts/k8s/kubernetesmastercustomdata.yml index 9a23f98f95..44b4d0285a 100644 --- a/parts/k8s/kubernetesmastercustomdata.yml +++ b/parts/k8s/kubernetesmastercustomdata.yml @@ -263,6 +263,10 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER # If Calico Policy enabled then update Cluster Cidr sed -i "s||{{WrapAsVariable "kubeClusterCidr"}}|g" "/etc/kubernetes/addons/calico-daemonset.yaml" {{end}} +{{if eq .OrchestratorProfile.KubernetesConfig.NetworkPlugin "flannel"}} + # If Flannel is enabled then update Cluster Cidr + sed -i "s||{{WrapAsVariable "kubeClusterCidr"}}|g" "/etc/kubernetes/addons/flannel-daemonset.yaml" +{{end}} {{if eq .OrchestratorProfile.KubernetesConfig.NetworkPolicy "cilium"}} # If Cilium Policy enabled then update the etcd certs and address sed -i "s||{{WrapAsVerbatim "variables('masterEtcdClientURLs')[copyIndex(variables('masterOffset'))]"}}|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" From 0a9fdcd510d4384289804101fd3d5fdb2c10739c Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Mon, 25 Jun 2018 15:33:06 -0700 Subject: [PATCH 26/74] Add support for Kubernetes v1.11.0-rc.2 (#3357) --- .circleci/config.yml | 72 -------------------------------------- pkg/api/common/versions.go | 1 + 2 files changed, 1 insertion(+), 72 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 77a23304ca..29a945e62d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -153,30 +153,6 @@ jobs: path: /go/src/github.com/Azure/acs-engine/_logs - store_artifacts: path: /go/src/github.com/Azure/acs-engine/_output - k8s-windows-1.7-release-e2e: - <<: *defaults - steps: - - checkout - - run: | - echo 'export TIMEOUT=30m' >> $BASH_ENV - echo 'export ORCHESTRATOR_RELEASE=1.7' >> $BASH_ENV - echo 'export CLUSTER_DEFINITION=examples/e2e-tests/kubernetes/windows/definition.json' >> $BASH_ENV - echo 'export CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT}' >> $BASH_ENV - echo 'export RETAIN_SSH=false' >> $BASH_ENV - echo 'export SUBSCRIPTION_ID=${SUBSCRIPTION_ID_E2E_KUBERNETES}' >> $BASH_ENV - echo 'export CLIENT_ID=${SERVICE_PRINCIPAL_CLIENT_ID_E2E_KUBERNETES}' >> $BASH_ENV - echo 'export CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET_E2E_KUBERNETES}' >> $BASH_ENV - - run: - name: compile - command: make build-binary - - run: - name: ginkgo k8s windows e2e tests - command: make test-kubernetes - no_output_timeout: "30m" - - store_artifacts: - path: /go/src/github.com/Azure/acs-engine/_logs - - store_artifacts: - path: /go/src/github.com/Azure/acs-engine/_output k8s-1.9-release-e2e: <<: *defaults steps: @@ -284,30 +260,6 @@ jobs: path: /go/src/github.com/Azure/acs-engine/_logs - store_artifacts: path: /go/src/github.com/Azure/acs-engine/_output - k8s-windows-1.8-release-e2e: - <<: *defaults - steps: - - checkout - - run: | - echo 'export TIMEOUT=30m' >> $BASH_ENV - echo 'export ORCHESTRATOR_RELEASE=1.8' >> $BASH_ENV - echo 'export CLUSTER_DEFINITION=examples/e2e-tests/kubernetes/windows/definition.json' >> $BASH_ENV - echo 'export CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT}' >> $BASH_ENV - echo 'export RETAIN_SSH=false' >> $BASH_ENV - echo 'export SUBSCRIPTION_ID=${SUBSCRIPTION_ID_E2E_KUBERNETES}' >> $BASH_ENV - echo 'export CLIENT_ID=${SERVICE_PRINCIPAL_CLIENT_ID_E2E_KUBERNETES}' >> $BASH_ENV - echo 'export CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET_E2E_KUBERNETES}' >> $BASH_ENV - - run: - name: compile - command: make build-binary - - run: - name: ginkgo k8s windows e2e tests - command: make test-kubernetes - no_output_timeout: "30m" - - store_artifacts: - path: /go/src/github.com/Azure/acs-engine/_logs - - store_artifacts: - path: /go/src/github.com/Azure/acs-engine/_output k8s-windows-1.9-release-e2e: <<: *defaults steps: @@ -503,24 +455,12 @@ workflows: filters: branches: ignore: master - - k8s-windows-1.7-release-e2e: - requires: - - pr-e2e-hold - filters: - branches: - ignore: master - k8s-1.8-release-e2e: requires: - pr-e2e-hold filters: branches: ignore: master - - k8s-windows-1.8-release-e2e: - requires: - - pr-e2e-hold - filters: - branches: - ignore: master - k8s-windows-1.9-release-e2e: requires: - pr-e2e-hold @@ -605,24 +545,12 @@ workflows: filters: branches: only: master - - k8s-windows-1.7-release-e2e: - requires: - - test - filters: - branches: - only: master - k8s-1.8-release-e2e: requires: - test filters: branches: only: master - - k8s-windows-1.8-release-e2e: - requires: - - test - filters: - branches: - only: master - k8s-windows-1.9-release-e2e: requires: - test diff --git a/pkg/api/common/versions.go b/pkg/api/common/versions.go index 6aa593ad69..ad6ceb4c22 100644 --- a/pkg/api/common/versions.go +++ b/pkg/api/common/versions.go @@ -64,6 +64,7 @@ var AllKubernetesSupportedVersions = map[string]bool{ "1.11.0-beta.1": true, "1.11.0-beta.2": true, "1.11.0-rc.1": true, + "1.11.0-rc.2": true, } // GetDefaultKubernetesVersion returns the default Kubernetes version, that is the latest patch of the default release From 2ce038639d68bd5837a7a924450d1c329fb13951 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Mon, 25 Jun 2018 16:02:20 -0700 Subject: [PATCH 27/74] fix nil panic (#3359) --- docs/clusterdefinition.md | 3 +++ pkg/api/vlabs/validate.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index 0a3bc5ad33..38ef437599 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -113,6 +113,7 @@ More usefully, let's add some custom configuration to the above addons: "addons": [ { "name": "tiller", + "enabled": true, "containers": [ { "name": "tiller", @@ -126,6 +127,7 @@ More usefully, let's add some custom configuration to the above addons: }, { "name": "kubernetes-dashboard", + "enabled": true, "containers": [ { "name": "kubernetes-dashboard", @@ -138,6 +140,7 @@ More usefully, let's add some custom configuration to the above addons: }, { "name": "cluster-autoscaler", + "enabled": true, "containers": [ { "name": "cluster-autoscaler", diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index 44a94220c9..831749b883 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -409,7 +409,7 @@ func (a *Properties) validateAddons() error { for _, addon := range a.OrchestratorProfile.KubernetesConfig.Addons { switch addon.Name { case "cluster-autoscaler": - if *addon.Enabled && isAvailabilitySets { + if helpers.IsTrueBoolPointer(addon.Enabled) && isAvailabilitySets { return fmt.Errorf("Cluster Autoscaler add-on can only be used with VirtualMachineScaleSets. Please specify \"availabilityProfile\": \"%s\"", VirtualMachineScaleSets) } case "nvidia-device-plugin": From d18ad7de6e5d7f627e05852b5ee198e1fee22abf Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Mon, 25 Jun 2018 16:12:14 -0700 Subject: [PATCH 28/74] add a basic DNS livenessprobe test (#3346) --- test/e2e/kubernetes/kubernetes_test.go | 22 +++++++++++++++++++ test/e2e/kubernetes/pod/pod.go | 21 +++++++++++++----- .../kubernetes/workloads/dns-liveness.yaml | 21 ++++++++++++++++++ 3 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 test/e2e/kubernetes/workloads/dns-liveness.yaml diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 1cdbe1b509..757fe9a3e3 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -66,6 +66,12 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu It("should have functional DNS", func() { if !eng.HasWindowsAgents() { + pod, err := pod.CreatePodFromFile(filepath.Join(WorkloadDir, "dns-liveness.yaml"), "dns-liveness", "default") + Expect(err).NotTo(HaveOccurred()) + running, err := pod.WaitOnReady(5*time.Second, 2*time.Minute) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + kubeConfig, err := GetConfig() Expect(err).NotTo(HaveOccurred()) master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) @@ -826,4 +832,20 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } })*/ }) + + Describe("after the cluster has been up for awhile", func() { + It("dns-liveness pod should not have any restarts", func() { + if !eng.HasWindowsAgents() { + pod, err := pod.Get("dns-liveness", "default") + Expect(err).NotTo(HaveOccurred()) + running, err := pod.WaitOnReady(5*time.Second, 3*time.Minute) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + restarts := pod.Status.ContainerStatuses[0].RestartCount + err = pod.Delete() + Expect(err).NotTo(HaveOccurred()) + Expect(restarts).To(Equal(0)) + } + }) + }) }) diff --git a/test/e2e/kubernetes/pod/pod.go b/test/e2e/kubernetes/pod/pod.go index 6713b4d05e..924c265cea 100644 --- a/test/e2e/kubernetes/pod/pod.go +++ b/test/e2e/kubernetes/pod/pod.go @@ -52,6 +52,16 @@ type Container struct { Resources Resources `json:"resources"` } +// ContainerStatus has status of a container +type ContainerStatus struct { + ContainerID string `json:"containerID"` + Image string `json:"image"` + ImageID string `json:"imageID"` + Name string `json:"name"` + Ready bool `json:"ready"` + RestartCount int `json:"restartCount"` +} + // EnvVar holds environment variables type EnvVar struct { Name string `json:"name"` @@ -84,10 +94,11 @@ type Limits struct { // Status holds information like hostIP and phase type Status struct { - HostIP string `json:"hostIP"` - Phase string `json:"phase"` - PodIP string `json:"podIP"` - StartTime time.Time `json:"startTime"` + HostIP string `json:"hostIP"` + Phase string `json:"phase"` + PodIP string `json:"podIP"` + StartTime time.Time `json:"startTime"` + ContainerStatuses []ContainerStatus `json:"containerStatuses"` } // CreatePodFromFile will create a Pod from file with a name @@ -170,7 +181,7 @@ func AreAllPodsRunning(podPrefix, namespace string) (bool, error) { var status []bool for _, pod := range pl.Pods { - matched, err := regexp.MatchString(podPrefix+"-.*", pod.Metadata.Name) + matched, err := regexp.MatchString(podPrefix, pod.Metadata.Name) if err != nil { log.Printf("Error trying to match pod name:%s\n", err) return false, err diff --git a/test/e2e/kubernetes/workloads/dns-liveness.yaml b/test/e2e/kubernetes/workloads/dns-liveness.yaml new file mode 100644 index 0000000000..7cf9345d63 --- /dev/null +++ b/test/e2e/kubernetes/workloads/dns-liveness.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness + name: dns-liveness +spec: + containers: + - name: dns-liveness + image: k8s.gcr.io/busybox + args: + - /bin/sh + - -c + - while true; do sleep 600; done + livenessProbe: + exec: + command: + - nslookup + - bbc.co.uk + initialDelaySeconds: 5 + periodSeconds: 5 \ No newline at end of file From ec39e30a4a5b97c78dbf397291598cf946233a34 Mon Sep 17 00:00:00 2001 From: Madhan Raj Mookkandy Date: Mon, 25 Jun 2018 16:29:24 -0700 Subject: [PATCH 29/74] Fix for kubelet startup script when using azure cni (#3301) --- parts/k8s/kuberneteswindowssetup.ps1 | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/parts/k8s/kuberneteswindowssetup.ps1 b/parts/k8s/kuberneteswindowssetup.ps1 index 704d4da9c6..0c4b30971c 100644 --- a/parts/k8s/kuberneteswindowssetup.ps1 +++ b/parts/k8s/kuberneteswindowssetup.ps1 @@ -360,6 +360,24 @@ Write-Host "NetworkPlugin azure, starting kubelet." # Turn off Firewall to enable pods to talk to service endpoints. (Kubelet should eventually do this) netsh advfirewall set allprofiles state off +# startup the service + +`$hnsNetwork = Get-HnsNetwork | ? Type -EQ `$global:NetworkMode.ToLower() + +if (`$hnsNetwork) +{ + # Kubelet has been restarted with existing network. + # Cleanup all containers + docker ps -q | foreach {docker rm `$_ -f} + # cleanup network + Write-Host "Cleaning up old HNS network found" + Remove-HnsNetwork `$hnsNetwork + Start-Sleep 10 + `$cnijson = "$global:KubeDir" + "\azure-vnet*" + remove-item `$cnijson -ErrorAction SilentlyContinue +} + +Restart-Service Kubeproxy $KubeletCommandLine From 5db2c58949d51d72eae90f60bd8226b370c63e2b Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Mon, 25 Jun 2018 17:21:36 -0700 Subject: [PATCH 30/74] update ubuntu image (#3361) --- pkg/acsengine/defaults.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index 9c814debc2..2edb9318b3 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -67,7 +67,7 @@ var ( ImageOffer: "UbuntuServer", ImageSku: "16.04-LTS", ImagePublisher: "Canonical", - ImageVersion: "16.04.201806120", + ImageVersion: "16.04.201806220", } //DefaultRHELOSImageConfig is the RHEL Linux distribution. From af25292cb9006ec0f071c555867e1ae6a841caca Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Mon, 25 Jun 2018 17:23:49 -0700 Subject: [PATCH 31/74] Add AKS-Engine billing extensions (#3358) --- parts/k8s/kubernetesagentresourcesvmas.t | 22 +++++++++++++++++++++ parts/k8s/kubernetesmasterresources.t | 21 ++++++++++++++++++++ parts/k8s/kuberneteswinagentresourcesvmas.t | 21 ++++++++++++++++++++ 3 files changed, 64 insertions(+) diff --git a/parts/k8s/kubernetesagentresourcesvmas.t b/parts/k8s/kubernetesagentresourcesvmas.t index f2977f339e..ee939fbf1d 100644 --- a/parts/k8s/kubernetesagentresourcesvmas.t +++ b/parts/k8s/kubernetesagentresourcesvmas.t @@ -324,4 +324,26 @@ {{end}} } } + }, + { + "type": "Microsoft.Compute/virtualMachines/extensions", + "name": "[concat(variables('{{.Name}}VMNamePrefix'), copyIndex(variables('{{.Name}}Offset')), '/computeAksLinuxBilling')]", + "apiVersion": "[variables('apiVersionDefault')]", + "copy": { + "count": "[sub(variables('{{.Name}}Count'), variables('{{.Name}}Offset'))]", + "name": "vmLoopNode" + }, + "location": "[variables('location')]", + "dependsOn": [ + "[concat('Microsoft.Compute/virtualMachines/', variables('{{.Name}}VMNamePrefix'), copyIndex(variables('{{.Name}}Offset')))]" + ], + "properties": { + "publisher": "Microsoft.AKS", + "type": {{if IsHostedMaster}}"Compute.AKS.Linux.Billing"{{else}}"Compute.AKS-Engine.Linux.Billing"{{end}}, + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": { + } + } } + diff --git a/parts/k8s/kubernetesmasterresources.t b/parts/k8s/kubernetesmasterresources.t index 399bb4826a..db7dfaf757 100644 --- a/parts/k8s/kubernetesmasterresources.t +++ b/parts/k8s/kubernetesmasterresources.t @@ -895,4 +895,25 @@ {{end}} } } + }, + { + "type": "Microsoft.Compute/virtualMachines/extensions", + "name": "[concat(variables('masterVMNamePrefix'), copyIndex(variables('masterOffset')), '/computeAksLinuxBilling')]", + "apiVersion": "[variables('apiVersionDefault')]", + "copy": { + "count": "[sub(variables('masterCount'), variables('masterOffset'))]", + "name": "vmLoopNode" + }, + "location": "[variables('location')]", + "dependsOn": [ + "[concat('Microsoft.Compute/virtualMachines/', variables('masterVMNamePrefix'), copyIndex(variables('masterOffset')))]" + ], + "properties": { + "publisher": "Microsoft.AKS", + "type": "Compute.AKS-Engine.Linux.Billing", + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": { + } + } }{{WriteLinkedTemplatesForExtensions}} diff --git a/parts/k8s/kuberneteswinagentresourcesvmas.t b/parts/k8s/kuberneteswinagentresourcesvmas.t index c7036f7619..4ae8bbeb97 100644 --- a/parts/k8s/kuberneteswinagentresourcesvmas.t +++ b/parts/k8s/kuberneteswinagentresourcesvmas.t @@ -274,4 +274,25 @@ "commandToExecute": "[concat('powershell.exe -ExecutionPolicy Unrestricted -command \"', '$arguments = ', variables('singleQuote'),'-MasterIP ',variables('kubernetesAPIServerIP'),' -KubeDnsServiceIp ',variables('kubeDnsServiceIp'),' -MasterFQDNPrefix ',variables('masterFqdnPrefix'),' -Location ',variables('location'),' -AgentKey ',variables('clientPrivateKey'),' -AADClientId ',variables('servicePrincipalClientId'),' -AADClientSecret ',variables('servicePrincipalClientSecret'),variables('singleQuote'), ' ; ', variables('windowsCustomScriptSuffix'), '\" > %SYSTEMDRIVE%\\AzureData\\CustomDataSetupScript.log 2>&1')]" } } + }, + { + "type": "Microsoft.Compute/virtualMachines/extensions", + "name": "[concat(variables('{{.Name}}VMNamePrefix'), copyIndex(variables('{{.Name}}Offset')), '/computeAksLinuxBilling')]", + "apiVersion": "[variables('apiVersionDefault')]", + "copy": { + "count": "[sub(variables('{{.Name}}Count'), variables('{{.Name}}Offset'))]", + "name": "vmLoopNode" + }, + "location": "[variables('location')]", + "dependsOn": [ + "[concat('Microsoft.Compute/virtualMachines/', variables('{{.Name}}VMNamePrefix'), copyIndex(variables('{{.Name}}Offset')))]" + ], + "properties": { + "publisher": "Microsoft.AKS", + "type": "Compute.AKS-Engine.Windows.Billing", + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": { + } + } } \ No newline at end of file From 9f1023ec596590a7c43d00fb4d3cb0ae3cd47697 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Mon, 25 Jun 2018 19:12:47 -0700 Subject: [PATCH 32/74] test for DNS failures before calico (#3365) because calico test disables outbound internet --- test/e2e/kubernetes/kubernetes_test.go | 32 +++++++++++++------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 757fe9a3e3..1a964f1319 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -670,6 +670,22 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu }) }) + Describe("after the cluster has been up for awhile", func() { + It("dns-liveness pod should not have any restarts", func() { + if !eng.HasWindowsAgents() { + pod, err := pod.Get("dns-liveness", "default") + Expect(err).NotTo(HaveOccurred()) + running, err := pod.WaitOnReady(5*time.Second, 3*time.Minute) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + restarts := pod.Status.ContainerStatuses[0].RestartCount + err = pod.Delete() + Expect(err).NotTo(HaveOccurred()) + Expect(restarts).To(Equal(0)) + } + }) + }) + Describe("with calico network policy enabled", func() { It("should apply a network policy and deny outbound internet access to nginx pod", func() { if eng.HasNetworkPolicy("calico") { @@ -832,20 +848,4 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } })*/ }) - - Describe("after the cluster has been up for awhile", func() { - It("dns-liveness pod should not have any restarts", func() { - if !eng.HasWindowsAgents() { - pod, err := pod.Get("dns-liveness", "default") - Expect(err).NotTo(HaveOccurred()) - running, err := pod.WaitOnReady(5*time.Second, 3*time.Minute) - Expect(err).NotTo(HaveOccurred()) - Expect(running).To(Equal(true)) - restarts := pod.Status.ContainerStatuses[0].RestartCount - err = pod.Delete() - Expect(err).NotTo(HaveOccurred()) - Expect(restarts).To(Equal(0)) - } - }) - }) }) From ae020e3e0349b2b9e53beb484c9b55255bef8091 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Tue, 26 Jun 2018 10:10:37 -0700 Subject: [PATCH 33/74] =?UTF-8?q?don=E2=80=99t=20run=20dns=20livenessprobe?= =?UTF-8?q?=20test=20w/=20calico=20(#3370)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/e2e/kubernetes/kubernetes_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 1a964f1319..44f35b534c 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -66,11 +66,13 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu It("should have functional DNS", func() { if !eng.HasWindowsAgents() { - pod, err := pod.CreatePodFromFile(filepath.Join(WorkloadDir, "dns-liveness.yaml"), "dns-liveness", "default") - Expect(err).NotTo(HaveOccurred()) - running, err := pod.WaitOnReady(5*time.Second, 2*time.Minute) - Expect(err).NotTo(HaveOccurred()) - Expect(running).To(Equal(true)) + if !eng.HasNetworkPolicy("calico") { + pod, err := pod.CreatePodFromFile(filepath.Join(WorkloadDir, "dns-liveness.yaml"), "dns-liveness", "default") + Expect(err).NotTo(HaveOccurred()) + running, err := pod.WaitOnReady(5*time.Second, 2*time.Minute) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + } kubeConfig, err := GetConfig() Expect(err).NotTo(HaveOccurred()) @@ -672,7 +674,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Describe("after the cluster has been up for awhile", func() { It("dns-liveness pod should not have any restarts", func() { - if !eng.HasWindowsAgents() { + if !eng.HasWindowsAgents() && !eng.HasNetworkPolicy("calico") { pod, err := pod.Get("dns-liveness", "default") Expect(err).NotTo(HaveOccurred()) running, err := pod.WaitOnReady(5*time.Second, 3*time.Minute) From c802da277e377ec9dbe80dc34110fb4cb0f44db7 Mon Sep 17 00:00:00 2001 From: Jim Minter Date: Tue, 26 Jun 2018 13:23:17 -0500 Subject: [PATCH 34/74] openshift: add SecurityGroupName and PrimaryAvailabilitySetName to azure.conf (#3363) --- pkg/api/vlabs/validate.go | 14 +++ pkg/api/vlabs/validate_test.go | 90 +++++++++++++++++++ pkg/openshift/certgen/release39/config.go | 14 +-- pkg/openshift/certgen/release39/defaults.go | 14 +-- .../certgen/release39/templates/bindata.go | 2 +- .../templates/node/etc/azure/azure.conf | 2 + pkg/openshift/certgen/unstable/config.go | 14 +-- pkg/openshift/certgen/unstable/defaults.go | 14 +-- .../certgen/unstable/templates/bindata.go | 2 +- .../node/etc/origin/cloudprovider/azure.conf | 2 + 10 files changed, 142 insertions(+), 26 deletions(-) diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index 831749b883..b0ad0810ab 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "net/url" + "reflect" "regexp" "strings" "time" @@ -377,10 +378,23 @@ func (a *Properties) validateAgentPoolProfiles() error { } } + if a.OrchestratorProfile.OrchestratorType == OpenShift { + if (agentPoolProfile.Name == "infra") != (agentPoolProfile.Role == "infra") { + return fmt.Errorf("OpenShift requires that the 'infra' agent pool profile, and no other, should have role 'infra'") + } + } + if e := agentPoolProfile.validateWindows(a.OrchestratorProfile, a.WindowsProfile); agentPoolProfile.OSType == Windows && e != nil { return e } } + + if a.OrchestratorProfile.OrchestratorType == OpenShift { + if !reflect.DeepEqual(profileNames, map[string]bool{"compute": true, "infra": true}) { + return fmt.Errorf("OpenShift requires exactly two agent pool profiles: compute and infra") + } + } + return nil } diff --git a/pkg/api/vlabs/validate_test.go b/pkg/api/vlabs/validate_test.go index b26cc5f973..5699c88ae1 100644 --- a/pkg/api/vlabs/validate_test.go +++ b/pkg/api/vlabs/validate_test.go @@ -1197,6 +1197,14 @@ func TestOpenshiftValidate(t *testing.T) { StorageProfile: ManagedDisks, AvailabilityProfile: AvailabilitySet, }, + { + Name: "infra", + Role: "infra", + Count: 1, + VMSize: "Standard_D4s_v3", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, }, LinuxProfile: &LinuxProfile{ AdminUsername: "admin", @@ -1316,3 +1324,85 @@ func validOpenShiftConifg() *OpenShiftConfig { ClusterPassword: "bar", } } + +func TestValidateAgentPoolProfiles(t *testing.T) { + tests := []struct { + name string + properties *Properties + expectedErr error + }{ + { + name: "valid", + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: OpenShift, + }, + AgentPoolProfiles: []*AgentPoolProfile{ + { + Name: "compute", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + { + Name: "infra", + Role: "infra", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + }, + }, + expectedErr: nil, + }, + { + name: "invalid - role wrong", + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: OpenShift, + }, + AgentPoolProfiles: []*AgentPoolProfile{ + { + Name: "compute", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + { + Name: "infra", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + }, + }, + expectedErr: errors.New("OpenShift requires that the 'infra' agent pool profile, and no other, should have role 'infra'"), + }, + { + name: "invalid - profiles misnamed", + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: OpenShift, + }, + AgentPoolProfiles: []*AgentPoolProfile{ + { + Name: "bad", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + { + Name: "infra", + Role: "infra", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + }, + }, + expectedErr: errors.New("OpenShift requires exactly two agent pool profiles: compute and infra"), + }, + } + + for _, test := range tests { + gotErr := test.properties.validateAgentPoolProfiles() + if !reflect.DeepEqual(test.expectedErr, gotErr) { + t.Logf("running scenario %q", test.name) + t.Errorf("expected error: %v\ngot error: %v", test.expectedErr, gotErr) + } + } +} diff --git a/pkg/openshift/certgen/release39/config.go b/pkg/openshift/certgen/release39/config.go index 6d129c8dee..ea6167d238 100644 --- a/pkg/openshift/certgen/release39/config.go +++ b/pkg/openshift/certgen/release39/config.go @@ -27,12 +27,14 @@ type Config struct { // AzureConfig represents the azure.conf configuration type AzureConfig struct { - TenantID string - SubscriptionID string - AADClientID string - AADClientSecret string - ResourceGroup string - Location string + TenantID string + SubscriptionID string + AADClientID string + AADClientSecret string + ResourceGroup string + Location string + SecurityGroupName string + PrimaryAvailabilitySetName string } // Master represents an OpenShift master configuration diff --git a/pkg/openshift/certgen/release39/defaults.go b/pkg/openshift/certgen/release39/defaults.go index e5b26852fd..19d6ed7af9 100644 --- a/pkg/openshift/certgen/release39/defaults.go +++ b/pkg/openshift/certgen/release39/defaults.go @@ -30,12 +30,14 @@ func OpenShiftSetDefaultCerts(a *api.Properties, orchestratorName, clusterID str ClusterPassword: a.OrchestratorProfile.OpenShiftConfig.ClusterPassword, EnableAADAuthentication: a.OrchestratorProfile.OpenShiftConfig.EnableAADAuthentication, AzureConfig: AzureConfig{ - TenantID: a.AzProfile.TenantID, - SubscriptionID: a.AzProfile.SubscriptionID, - AADClientID: a.ServicePrincipalProfile.ClientID, - AADClientSecret: a.ServicePrincipalProfile.Secret, - ResourceGroup: a.AzProfile.ResourceGroup, - Location: a.AzProfile.Location, + TenantID: a.AzProfile.TenantID, + SubscriptionID: a.AzProfile.SubscriptionID, + AADClientID: a.ServicePrincipalProfile.ClientID, + AADClientSecret: a.ServicePrincipalProfile.Secret, + ResourceGroup: a.AzProfile.ResourceGroup, + Location: a.AzProfile.Location, + SecurityGroupName: fmt.Sprintf("%s-master-%s-nsg", orchestratorName, clusterID), + PrimaryAvailabilitySetName: fmt.Sprintf("compute-availabilityset-%s", clusterID), }, } diff --git a/pkg/openshift/certgen/release39/templates/bindata.go b/pkg/openshift/certgen/release39/templates/bindata.go index c77725467b..0822a90a70 100644 --- a/pkg/openshift/certgen/release39/templates/bindata.go +++ b/pkg/openshift/certgen/release39/templates/bindata.go @@ -299,7 +299,7 @@ func masterTmpBootstrapconfigsMasterConfigYaml() (*asset, error) { return a, nil } -var _nodeEtcAzureAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2a\x49\xcd\x4b\xcc\x2b\xf1\x4c\xb1\x52\xa8\xae\x56\xd0\x73\xac\x2a\x2d\x4a\x75\xce\xcf\x4b\xcb\x4c\xd7\x0b\x81\xc8\xb8\x28\xd4\xd6\x72\x15\x97\x26\x15\x27\x17\x65\x16\x94\x64\xe6\xe7\x61\x53\x1b\x8c\x2c\x0f\xd6\x91\x98\x98\xe2\x9c\x93\x99\x8a\xdd\x68\x47\x47\x17\xa8\x24\xaa\xda\xe0\xd4\xe4\xa2\xd4\x12\x3c\xea\x21\x0a\xa0\x7a\x42\x88\x72\x7a\x51\x6a\x71\x7e\x69\x51\x72\xaa\x7b\x51\x7e\x69\x01\xa6\xd2\x20\x64\x69\x90\xfa\x9c\xfc\xe4\x44\x90\x37\x30\x95\xfa\x40\x65\x40\xaa\x00\x01\x00\x00\xff\xff\x69\xfe\xce\x7d\x37\x01\x00\x00") +var _nodeEtcAzureAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\xd0\x51\x0a\x82\x40\x10\x06\xe0\xf7\x4e\xe1\x09\x3c\x40\x6f\x8b\x42\x04\x11\x91\x5e\x60\x5c\xa7\x18\x58\x77\x65\x9c\x0d\x4c\xbc\x7b\xac\xfa\x60\xb9\x45\xcf\xff\xf7\xff\xec\x8e\xa0\x05\x2b\xc7\x7a\x9f\x0c\x43\x92\xaa\xa7\x67\xcc\x9c\xbd\xd1\x3d\x2d\xe7\x24\x4f\xc6\x71\xd7\xf9\xaa\xd3\x4c\xad\x90\xb3\x31\x5b\xac\xf3\xa9\x01\x50\x67\x86\x30\x3e\xad\x54\xbe\x84\xef\xb6\x40\xcd\x28\x3f\xfc\x0c\x96\x4e\xf9\xd7\xd3\x19\x3b\xe7\x59\xe3\x81\x9d\x6f\xb7\xf4\xba\x8e\x83\x37\x4e\x43\xf8\xc6\x96\x9e\x96\x64\x3a\x08\x6a\xcf\x24\xfd\x54\x3b\x43\x83\x91\x9b\x7c\x92\xd0\x6b\x99\x1a\xe0\x5e\x3d\x80\x0c\x54\x64\x48\xfa\x02\x25\x3e\x70\xf9\x6a\xc3\xd2\x2b\x00\x00\xff\xff\x0d\x19\xb3\x4a\xb9\x01\x00\x00") func nodeEtcAzureAzureConfBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf b/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf index 5241e2afa2..fa3d075114 100644 --- a/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf +++ b/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf @@ -5,3 +5,5 @@ aadClientSecret: {{ .AzureConfig.AADClientSecret }} aadTenantId: {{ .AzureConfig.TenantID }} resourceGroup: {{ .AzureConfig.ResourceGroup }} location: {{ .AzureConfig.Location }} +securityGroupName: {{ .AzureConfig.SecurityGroupName }} +primaryAvailabilitySetName: {{ .AzureConfig.PrimaryAvailabilitySetName }} diff --git a/pkg/openshift/certgen/unstable/config.go b/pkg/openshift/certgen/unstable/config.go index e5c9b1c716..f7fe5d8739 100644 --- a/pkg/openshift/certgen/unstable/config.go +++ b/pkg/openshift/certgen/unstable/config.go @@ -27,12 +27,14 @@ type Config struct { // AzureConfig represents the azure.conf configuration type AzureConfig struct { - TenantID string - SubscriptionID string - AADClientID string - AADClientSecret string - ResourceGroup string - Location string + TenantID string + SubscriptionID string + AADClientID string + AADClientSecret string + ResourceGroup string + Location string + SecurityGroupName string + PrimaryAvailabilitySetName string } // Master represents an OpenShift master configuration diff --git a/pkg/openshift/certgen/unstable/defaults.go b/pkg/openshift/certgen/unstable/defaults.go index cfdc6aafe2..a6382718e4 100644 --- a/pkg/openshift/certgen/unstable/defaults.go +++ b/pkg/openshift/certgen/unstable/defaults.go @@ -30,12 +30,14 @@ func OpenShiftSetDefaultCerts(a *api.Properties, orchestratorName, clusterID str ClusterPassword: a.OrchestratorProfile.OpenShiftConfig.ClusterPassword, EnableAADAuthentication: a.OrchestratorProfile.OpenShiftConfig.EnableAADAuthentication, AzureConfig: AzureConfig{ - TenantID: a.AzProfile.TenantID, - SubscriptionID: a.AzProfile.SubscriptionID, - AADClientID: a.ServicePrincipalProfile.ClientID, - AADClientSecret: a.ServicePrincipalProfile.Secret, - ResourceGroup: a.AzProfile.ResourceGroup, - Location: a.AzProfile.Location, + TenantID: a.AzProfile.TenantID, + SubscriptionID: a.AzProfile.SubscriptionID, + AADClientID: a.ServicePrincipalProfile.ClientID, + AADClientSecret: a.ServicePrincipalProfile.Secret, + ResourceGroup: a.AzProfile.ResourceGroup, + Location: a.AzProfile.Location, + SecurityGroupName: fmt.Sprintf("%s-master-%s-nsg", orchestratorName, clusterID), + PrimaryAvailabilitySetName: fmt.Sprintf("compute-availabilityset-%s", clusterID), }, } diff --git a/pkg/openshift/certgen/unstable/templates/bindata.go b/pkg/openshift/certgen/unstable/templates/bindata.go index ffc7da2d85..e1b0f4a9f7 100644 --- a/pkg/openshift/certgen/unstable/templates/bindata.go +++ b/pkg/openshift/certgen/unstable/templates/bindata.go @@ -298,7 +298,7 @@ func masterTmpBootstrapconfigsMasterConfigYaml() (*asset, error) { return a, nil } -var _nodeEtcOriginCloudproviderAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2a\x49\xcd\x4b\xcc\x2b\xf1\x4c\xb1\x52\xa8\xae\x56\xd0\x73\xac\x2a\x2d\x4a\x75\xce\xcf\x4b\xcb\x4c\xd7\x0b\x81\xc8\xb8\x28\xd4\xd6\x72\x15\x97\x26\x15\x27\x17\x65\x16\x94\x64\xe6\xe7\x61\x53\x1b\x8c\x2c\x0f\xd6\x91\x98\x98\xe2\x9c\x93\x99\x8a\xdd\x68\x47\x47\x17\xa8\x24\xaa\xda\xe0\xd4\xe4\xa2\xd4\x12\x3c\xea\x21\x0a\xa0\x7a\x42\x88\x72\x7a\x51\x6a\x71\x7e\x69\x51\x72\xaa\x7b\x51\x7e\x69\x01\xa6\xd2\x20\x64\x69\x90\xfa\x9c\xfc\xe4\x44\x90\x37\x30\x95\xfa\x40\x65\x40\xaa\x00\x01\x00\x00\xff\xff\x69\xfe\xce\x7d\x37\x01\x00\x00") +var _nodeEtcOriginCloudproviderAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\xd0\x51\x0a\x82\x40\x10\x06\xe0\xf7\x4e\xe1\x09\x3c\x40\x6f\x8b\x42\x04\x11\x91\x5e\x60\x5c\xa7\x18\x58\x77\x65\x9c\x0d\x4c\xbc\x7b\xac\xfa\x60\xb9\x45\xcf\xff\xf7\xff\xec\x8e\xa0\x05\x2b\xc7\x7a\x9f\x0c\x43\x92\xaa\xa7\x67\xcc\x9c\xbd\xd1\x3d\x2d\xe7\x24\x4f\xc6\x71\xd7\xf9\xaa\xd3\x4c\xad\x90\xb3\x31\x5b\xac\xf3\xa9\x01\x50\x67\x86\x30\x3e\xad\x54\xbe\x84\xef\xb6\x40\xcd\x28\x3f\xfc\x0c\x96\x4e\xf9\xd7\xd3\x19\x3b\xe7\x59\xe3\x81\x9d\x6f\xb7\xf4\xba\x8e\x83\x37\x4e\x43\xf8\xc6\x96\x9e\x96\x64\x3a\x08\x6a\xcf\x24\xfd\x54\x3b\x43\x83\x91\x9b\x7c\x92\xd0\x6b\x99\x1a\xe0\x5e\x3d\x80\x0c\x54\x64\x48\xfa\x02\x25\x3e\x70\xf9\x6a\xc3\xd2\x2b\x00\x00\xff\xff\x0d\x19\xb3\x4a\xb9\x01\x00\x00") func nodeEtcOriginCloudproviderAzureConfBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf b/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf index 5241e2afa2..fa3d075114 100644 --- a/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf +++ b/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf @@ -5,3 +5,5 @@ aadClientSecret: {{ .AzureConfig.AADClientSecret }} aadTenantId: {{ .AzureConfig.TenantID }} resourceGroup: {{ .AzureConfig.ResourceGroup }} location: {{ .AzureConfig.Location }} +securityGroupName: {{ .AzureConfig.SecurityGroupName }} +primaryAvailabilitySetName: {{ .AzureConfig.PrimaryAvailabilitySetName }} From 9373df08303e2928a71a9c7d36bfd36ee02490b6 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Tue, 26 Jun 2018 12:57:02 -0700 Subject: [PATCH 35/74] Kubernetes: update etcd to 3.2.23 (#3372) --- pkg/acsengine/const.go | 2 +- pkg/api/vlabs/validate.go | 2 +- pkg/api/vlabs/validate_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/acsengine/const.go b/pkg/acsengine/const.go index b72557ec09..e626537a91 100644 --- a/pkg/acsengine/const.go +++ b/pkg/acsengine/const.go @@ -123,7 +123,7 @@ const ( // DefaultOpenshiftOrchestratorName specifies the 3 character orchestrator code of the cluster template and affects resource naming. DefaultOpenshiftOrchestratorName = "ocp" // DefaultEtcdVersion specifies the default etcd version to install - DefaultEtcdVersion = "3.2.16" + DefaultEtcdVersion = "3.2.23" // DefaultEtcdDiskSize specifies the default size for Kubernetes master etcd disk volumes in GB DefaultEtcdDiskSize = "256" // DefaultEtcdDiskSizeGT3Nodes = size for Kubernetes master etcd disk volumes in GB if > 3 nodes diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index b0ad0810ab..59395c5b13 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -28,7 +28,7 @@ var ( "3.0.0", "3.0.1", "3.0.2", "3.0.3", "3.0.4", "3.0.5", "3.0.6", "3.0.7", "3.0.8", "3.0.9", "3.0.10", "3.0.11", "3.0.12", "3.0.13", "3.0.14", "3.0.15", "3.0.16", "3.0.17", "3.1.0", "3.1.1", "3.1.2", "3.1.2", "3.1.3", "3.1.4", "3.1.5", "3.1.6", "3.1.7", "3.1.8", "3.1.9", "3.1.10", "3.2.0", "3.2.1", "3.2.2", "3.2.3", "3.2.4", "3.2.5", "3.2.6", "3.2.7", "3.2.8", "3.2.9", "3.2.11", "3.2.12", - "3.2.13", "3.2.14", "3.2.15", "3.2.16", "3.3.0", "3.3.1"} + "3.2.13", "3.2.14", "3.2.15", "3.2.16", "3.2.23", "3.3.0", "3.3.1"} networkPluginPlusPolicyAllowed = []k8sNetworkConfig{ { networkPlugin: "", diff --git a/pkg/api/vlabs/validate_test.go b/pkg/api/vlabs/validate_test.go index 5699c88ae1..d67e3a232c 100644 --- a/pkg/api/vlabs/validate_test.go +++ b/pkg/api/vlabs/validate_test.go @@ -56,7 +56,7 @@ func Test_OrchestratorProfile_Validate(t *testing.T) { }, }, }, - expectedError: "Invalid etcd version \"1.0.0\", please use one of the following versions: [2.2.5 2.3.0 2.3.1 2.3.2 2.3.3 2.3.4 2.3.5 2.3.6 2.3.7 2.3.8 3.0.0 3.0.1 3.0.2 3.0.3 3.0.4 3.0.5 3.0.6 3.0.7 3.0.8 3.0.9 3.0.10 3.0.11 3.0.12 3.0.13 3.0.14 3.0.15 3.0.16 3.0.17 3.1.0 3.1.1 3.1.2 3.1.2 3.1.3 3.1.4 3.1.5 3.1.6 3.1.7 3.1.8 3.1.9 3.1.10 3.2.0 3.2.1 3.2.2 3.2.3 3.2.4 3.2.5 3.2.6 3.2.7 3.2.8 3.2.9 3.2.11 3.2.12 3.2.13 3.2.14 3.2.15 3.2.16 3.3.0 3.3.1]", + expectedError: "Invalid etcd version \"1.0.0\", please use one of the following versions: [2.2.5 2.3.0 2.3.1 2.3.2 2.3.3 2.3.4 2.3.5 2.3.6 2.3.7 2.3.8 3.0.0 3.0.1 3.0.2 3.0.3 3.0.4 3.0.5 3.0.6 3.0.7 3.0.8 3.0.9 3.0.10 3.0.11 3.0.12 3.0.13 3.0.14 3.0.15 3.0.16 3.0.17 3.1.0 3.1.1 3.1.2 3.1.2 3.1.3 3.1.4 3.1.5 3.1.6 3.1.7 3.1.8 3.1.9 3.1.10 3.2.0 3.2.1 3.2.2 3.2.3 3.2.4 3.2.5 3.2.6 3.2.7 3.2.8 3.2.9 3.2.11 3.2.12 3.2.13 3.2.14 3.2.15 3.2.16 3.2.23 3.3.0 3.3.1]", }, "should error when KubernetesConfig has enableAggregatedAPIs enabled with an invalid version": { properties: &Properties{ From c357400d5ee1cb6d25e3ec1c895043e32ca504bd Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Tue, 26 Jun 2018 15:36:32 -0700 Subject: [PATCH 36/74] vmss Microsoft.AKS billing extension (#3360) --- parts/k8s/kubernetesagentresourcesvmss.t | 11 +++++++++++ parts/k8s/kuberneteswinagentresourcesvmss.t | 11 +++++++++++ 2 files changed, 22 insertions(+) diff --git a/parts/k8s/kubernetesagentresourcesvmss.t b/parts/k8s/kubernetesagentresourcesvmss.t index 0977fd1255..d9264d2437 100644 --- a/parts/k8s/kubernetesagentresourcesvmss.t +++ b/parts/k8s/kubernetesagentresourcesvmss.t @@ -145,6 +145,17 @@ "commandToExecute": "[concat(variables('provisionScriptParametersCommon'),' /usr/bin/nohup /bin/bash -c \"/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1\"')]" } } + }, + { + "name": "[concat(variables('{{.Name}}VMNamePrefix'), '-computeAksLinuxBilling')]", + "location": "[variables('location')]", + "properties": { + "publisher": "Microsoft.AKS", + "type": "Compute.AKS-Engine.Linux.Billing", + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": {} + } } {{if UseManagedIdentity}} ,{ diff --git a/parts/k8s/kuberneteswinagentresourcesvmss.t b/parts/k8s/kuberneteswinagentresourcesvmss.t index 80ff7077dd..6be6d9e0c1 100644 --- a/parts/k8s/kuberneteswinagentresourcesvmss.t +++ b/parts/k8s/kuberneteswinagentresourcesvmss.t @@ -113,6 +113,17 @@ "commandToExecute": "[concat('powershell.exe -ExecutionPolicy Unrestricted -command \"', '$arguments = ', variables('singleQuote'),'-MasterIP ',variables('kubernetesAPIServerIP'),' -KubeDnsServiceIp ',variables('kubeDnsServiceIp'),' -MasterFQDNPrefix ',variables('masterFqdnPrefix'),' -Location ',variables('location'),' -AgentKey ',variables('clientPrivateKey'),' -AADClientId ',variables('servicePrincipalClientId'),' -AADClientSecret ',variables('servicePrincipalClientSecret'),variables('singleQuote'), ' ; ', variables('windowsCustomScriptSuffix'), '\" > %SYSTEMDRIVE%\\AzureData\\CustomDataSetupScript.log 2>&1')]" } } + }, + { + "name": "[concat(variables('{{.Name}}VMNamePrefix'), '-computeAksLinuxBilling')]", + "location": "[variables('location')]", + "properties": { + "publisher": "Microsoft.AKS", + "type": "Compute.AKS-Engine.Windows.Billing", + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": {} + } } {{if UseManagedIdentity}} ,{ From 91cdfe965bda92cbc4610cfad91fe455d3445252 Mon Sep 17 00:00:00 2001 From: Adelina Tuvenie Date: Wed, 27 Jun 2018 01:37:16 +0300 Subject: [PATCH 37/74] Adds the missing logic for handling the preProvision extensions on Win agents. (#3367) --- parts/k8s/kuberneteswindowssetup.ps1 | 3 +++ pkg/acsengine/template_generator.go | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/parts/k8s/kuberneteswindowssetup.ps1 b/parts/k8s/kuberneteswindowssetup.ps1 index 0c4b30971c..9bfa37daea 100644 --- a/parts/k8s/kuberneteswindowssetup.ps1 +++ b/parts/k8s/kuberneteswindowssetup.ps1 @@ -642,6 +642,9 @@ try Write-Log "Set Internet Explorer" Set-Explorer + Write-Log "Start preProvisioning script" + PREPROVISION_EXTENSION + Write-Log "Setup Complete, reboot computer" Restart-Computer } diff --git a/pkg/acsengine/template_generator.go b/pkg/acsengine/template_generator.go index 788d6866da..9266e2c332 100644 --- a/pkg/acsengine/template_generator.go +++ b/pkg/acsengine/template_generator.go @@ -598,6 +598,11 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat if e != nil { return "" } + preprovisionCmd := "" + if profile.PreprovisionExtension != nil { + preprovisionCmd = makeAgentExtensionScriptCommands(cs, profile) + } + str = strings.Replace(str, "PREPROVISION_EXTENSION", escapeSingleLine(strings.TrimSpace(preprovisionCmd)), -1) return fmt.Sprintf("\"customData\": \"[base64(concat('%s'))]\",", str) }, "GetMasterSwarmModeCustomData": func() string { From 9e49acf7297a8b09e62f81fc54027fd72ab7bddf Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Tue, 26 Jun 2018 16:28:09 -0700 Subject: [PATCH 38/74] enable v1.11.0-rc.3 (#3376) --- pkg/api/common/versions.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/api/common/versions.go b/pkg/api/common/versions.go index ad6ceb4c22..8678d13bdd 100644 --- a/pkg/api/common/versions.go +++ b/pkg/api/common/versions.go @@ -65,6 +65,7 @@ var AllKubernetesSupportedVersions = map[string]bool{ "1.11.0-beta.2": true, "1.11.0-rc.1": true, "1.11.0-rc.2": true, + "1.11.0-rc.3": true, } // GetDefaultKubernetesVersion returns the default Kubernetes version, that is the latest patch of the default release From 513eed38b2384cb1163697f0d4bbf03ee1213a13 Mon Sep 17 00:00:00 2001 From: Michalis Kargakis Date: Wed, 27 Jun 2018 18:29:22 +0200 Subject: [PATCH 39/74] Fetch openshift test image from new location (#3354) --- .circleci/config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 29a945e62d..54c0636a51 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -335,7 +335,7 @@ jobs: openshift-3.9-rhel-e2e: working_directory: /go/src/github.com/Azure/acs-engine docker: - - image: registry.svc.ci.openshift.org/ci/acs-engine-tests:v3.9 + - image: registry.svc.ci.openshift.org/azure/acs-engine-tests:v3.9 environment: GOPATH: /go steps: @@ -367,7 +367,7 @@ jobs: openshift-3.9-rhel-e2e-vnet: working_directory: /go/src/github.com/Azure/acs-engine docker: - - image: registry.svc.ci.openshift.org/ci/acs-engine-tests:v3.9 + - image: registry.svc.ci.openshift.org/azure/acs-engine-tests:v3.9 environment: GOPATH: /go steps: @@ -399,7 +399,7 @@ jobs: openshift-3.9-centos-e2e: working_directory: /go/src/github.com/Azure/acs-engine docker: - - image: registry.svc.ci.openshift.org/ci/acs-engine-tests:v3.9 + - image: registry.svc.ci.openshift.org/azure/acs-engine-tests:v3.9 environment: GOPATH: /go steps: From 09d6a8d8cf6cf63f617b18275a51d585425e0d51 Mon Sep 17 00:00:00 2001 From: Lachlan Evenson Date: Wed, 27 Jun 2018 13:44:46 -0600 Subject: [PATCH 40/74] add GPU ExtendedResourceToleration admission controller support (#3181) --- ...addons-nvidia-device-plugin-daemonset.yaml | 30 +++++++++--- parts/k8s/kubernetesagentcustomdata.yml | 4 +- parts/k8s/kubernetesbase.t | 7 ++- pkg/acsengine/const.go | 4 +- pkg/acsengine/defaults-apiserver.go | 2 +- pkg/acsengine/defaults-apiserver_test.go | 2 +- pkg/acsengine/defaults.go | 8 ++-- pkg/acsengine/k8s_versions.go | 6 ++- pkg/acsengine/k8s_versions_test.go | 48 ++++++++++++++++++- pkg/acsengine/params_k8s.go | 6 +-- pkg/acsengine/template_generator.go | 10 ++-- pkg/api/const.go | 4 +- pkg/api/types.go | 2 +- pkg/api/types_test.go | 2 +- pkg/api/vlabs/validate.go | 38 ++++++++------- 15 files changed, 124 insertions(+), 49 deletions(-) diff --git a/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml index 6ed2906463..63fa6ac52c 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml @@ -1,26 +1,42 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: labels: + k8s-app: nvidia-device-plugin kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile name: nvidia-device-plugin namespace: kube-system spec: + selector: + matchLabels: + k8s-app: nvidia-device-plugin + updateStrategy: + type: RollingUpdate template: metadata: - # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler - # reserves resources for critical add-on pods so that they can be rescheduled after - # a failure. This annotation works in tandem with the toleration below. annotations: scheduler.alpha.kubernetes.io/critical-pod: "" labels: - name: nvidia-device-plugin-ds + k8s-app: nvidia-device-plugin spec: + priorityClassName: system-node-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: accelerator + operator: In + values: + - nvidia tolerations: - # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode. - # This, along with the annotation above marks this pod as a critical add-on. - key: CriticalAddonsOnly operator: Exists + - key: nvidia.com/gpu + effect: NoSchedule + operator: Equal + value: "true" containers: - image: name: nvidia-device-plugin-ctr diff --git a/parts/k8s/kubernetesagentcustomdata.yml b/parts/k8s/kubernetesagentcustomdata.yml index 7466b748fb..28f69f2084 100644 --- a/parts/k8s/kubernetesagentcustomdata.yml +++ b/parts/k8s/kubernetesagentcustomdata.yml @@ -35,14 +35,14 @@ write_files: "log-opts": { "max-size": "50m", "max-file": "5" - }{{if IsNVIDIADevicePluginEnabled}} + }{{if IsNSeriesSKU .}}{{if IsNVIDIADevicePluginEnabled}} ,"default-runtime": "nvidia", "runtimes": { "nvidia": { "path": "/usr/bin/nvidia-container-runtime", "runtimeArgs": [] } - }{{end}} + }{{end}}{{end}} } {{end}} diff --git a/parts/k8s/kubernetesbase.t b/parts/k8s/kubernetesbase.t index d9efe71ab2..dd00475992 100644 --- a/parts/k8s/kubernetesbase.t +++ b/parts/k8s/kubernetesbase.t @@ -41,6 +41,11 @@ {{range $index, $agent := .AgentPoolProfiles}} "{{.Name}}Index": {{$index}}, {{template "k8s/kubernetesagentvars.t" .}} + {{if IsNSeriesSKU .}} + {{if IsNVIDIADevicePluginEnabled}} + "registerWithGpuTaints": "nvidia.com/gpu=true:NoSchedule", + {{end}} + {{end}} {{if .IsStorageAccount}} {{if .HasDisks}} "{{.Name}}DataAccountName": "[concat(variables('storageAccountBaseName'), 'data{{$index}}')]", @@ -187,4 +192,4 @@ {{end}} } -} \ No newline at end of file +} diff --git a/pkg/acsengine/const.go b/pkg/acsengine/const.go index e626537a91..40ebf79e8f 100644 --- a/pkg/acsengine/const.go +++ b/pkg/acsengine/const.go @@ -136,8 +136,8 @@ const ( DefaultReschedulerAddonName = "rescheduler" // DefaultMetricsServerAddonName is the name of the kubernetes Metrics server addon deployment DefaultMetricsServerAddonName = "metrics-server" - // DefaultNVIDIADevicePluginAddonName is the name of the kubernetes NVIDIA Device Plugin daemon set - DefaultNVIDIADevicePluginAddonName = "nvidia-device-plugin" + // NVIDIADevicePluginAddonName is the name of the kubernetes NVIDIA Device Plugin daemon set + NVIDIADevicePluginAddonName = "nvidia-device-plugin" // ContainerMonitoringAddonName is the name of the kubernetes Container Monitoring addon deployment ContainerMonitoringAddonName = "container-monitoring" // AzureCNINetworkMonitoringAddonName is the name of the Azure CNI networkmonitor addon diff --git a/pkg/acsengine/defaults-apiserver.go b/pkg/acsengine/defaults-apiserver.go index 0a1b2bf862..34f278f1ee 100644 --- a/pkg/acsengine/defaults-apiserver.go +++ b/pkg/acsengine/defaults-apiserver.go @@ -143,7 +143,7 @@ func getDefaultAdmissionControls(cs *api.ContainerService) (string, string) { // Add new version case when applying admission controllers only available in that version or later switch { case common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.9.0"): - admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" + admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages,ExtendedResourceToleration" default: admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" } diff --git a/pkg/acsengine/defaults-apiserver_test.go b/pkg/acsengine/defaults-apiserver_test.go index ee1520b331..7ea3d972d8 100644 --- a/pkg/acsengine/defaults-apiserver_test.go +++ b/pkg/acsengine/defaults-apiserver_test.go @@ -322,7 +322,7 @@ func TestAPIServerConfigDefaultAdmissionControls(t *testing.T) { admissonControlKey := "--admission-control" cs := createContainerService("testcluster", version, 3, 2) cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig = map[string]string{} - cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig[admissonControlKey] = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" + cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig[admissonControlKey] = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages,ExtendedResourceToleration" setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index 2edb9318b3..29f0730d4b 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -307,10 +307,10 @@ var ( // DefaultNVIDIADevicePluginAddonsConfig is the default NVIDIA Device Plugin Kubernetes addon Config DefaultNVIDIADevicePluginAddonsConfig = api.KubernetesAddon{ - Name: DefaultNVIDIADevicePluginAddonName, + Name: NVIDIADevicePluginAddonName, Containers: []api.KubernetesContainerSpec{ { - Name: DefaultNVIDIADevicePluginAddonName, + Name: NVIDIADevicePluginAddonName, }, }, } @@ -453,7 +453,7 @@ func setOrchestratorDefaults(cs *api.ContainerService) { m = getAddonsIndexByName(o.KubernetesConfig.Addons, DefaultMetricsServerAddonName) o.KubernetesConfig.Addons[m].Enabled = k8sVersionMetricsServerAddonEnabled(o) } - n := getAddonsIndexByName(o.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonName) + n := getAddonsIndexByName(o.KubernetesConfig.Addons, NVIDIADevicePluginAddonName) if n < 0 { // Provide default acs-engine config for NVIDIA Device Plugin o.KubernetesConfig.Addons = append(o.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonsConfig) @@ -563,7 +563,7 @@ func setOrchestratorDefaults(cs *api.ContainerService) { if a.OrchestratorProfile.KubernetesConfig.Addons[m].IsEnabled(api.DefaultMetricsServerAddonEnabled) { a.OrchestratorProfile.KubernetesConfig.Addons[m] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[m], DefaultMetricsServerAddonsConfig) } - n := getAddonsIndexByName(a.OrchestratorProfile.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonName) + n := getAddonsIndexByName(a.OrchestratorProfile.KubernetesConfig.Addons, NVIDIADevicePluginAddonName) if a.OrchestratorProfile.KubernetesConfig.Addons[n].IsEnabled(api.DefaultNVIDIADevicePluginAddonEnabled) { a.OrchestratorProfile.KubernetesConfig.Addons[n] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[n], DefaultNVIDIADevicePluginAddonsConfig) } diff --git a/pkg/acsengine/k8s_versions.go b/pkg/acsengine/k8s_versions.go index f889136508..482c06fbb9 100644 --- a/pkg/acsengine/k8s_versions.go +++ b/pkg/acsengine/k8s_versions.go @@ -25,6 +25,7 @@ var k8sComponentVersions = map[string]map[string]string{ ContainerMonitoringAddonName: "oms:ciprod05082018", AzureCNINetworkMonitoringAddonName: "networkmonitor:v0.0.4", "cluster-autoscaler": "cluster-autoscaler:v1.3.0", + NVIDIADevicePluginAddonName: "k8s-device-plugin:1.10", "nodestatusfreq": DefaultKubernetesNodeStatusUpdateFrequency, "nodegraceperiod": DefaultKubernetesCtrlMgrNodeMonitorGracePeriod, "podeviction": DefaultKubernetesCtrlMgrPodEvictionTimeout, @@ -55,7 +56,7 @@ var k8sComponentVersions = map[string]map[string]string{ ContainerMonitoringAddonName: "oms:ciprod05082018", AzureCNINetworkMonitoringAddonName: "networkmonitor:v0.0.4", "cluster-autoscaler": "cluster-autoscaler:v1.2.2", - "nvidia-device-plugin": "k8s-device-plugin:1.10", + NVIDIADevicePluginAddonName: "k8s-device-plugin:1.10", "nodestatusfreq": DefaultKubernetesNodeStatusUpdateFrequency, "nodegraceperiod": DefaultKubernetesCtrlMgrNodeMonitorGracePeriod, "podeviction": DefaultKubernetesCtrlMgrPodEvictionTimeout, @@ -277,6 +278,7 @@ func getK8sVersionComponents(version string, overrides map[string]string) map[st "ratelimitbucket": k8sComponentVersions["1.11"]["ratelimitbucket"], "gchighthreshold": k8sComponentVersions["1.11"]["gchighthreshold"], "gclowthreshold": k8sComponentVersions["1.11"]["gclowthreshold"], + NVIDIADevicePluginAddonName: k8sComponentVersions["1.11"][NVIDIADevicePluginAddonName], } case "1.10": ret = map[string]string{ @@ -311,7 +313,7 @@ func getK8sVersionComponents(version string, overrides map[string]string) map[st "gchighthreshold": k8sComponentVersions["1.10"]["gchighthreshold"], "gclowthreshold": k8sComponentVersions["1.10"]["gclowthreshold"], DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.10"]["cluster-autoscaler"], - DefaultNVIDIADevicePluginAddonName: k8sComponentVersions["1.10"]["nvidia-device-plugin"], + NVIDIADevicePluginAddonName: k8sComponentVersions["1.10"][NVIDIADevicePluginAddonName], } case "1.9": ret = map[string]string{ diff --git a/pkg/acsengine/k8s_versions_test.go b/pkg/acsengine/k8s_versions_test.go index c4821ee55d..a63c2a4dd3 100644 --- a/pkg/acsengine/k8s_versions_test.go +++ b/pkg/acsengine/k8s_versions_test.go @@ -30,6 +30,7 @@ func TestGetK8sVersionComponents(t *testing.T) { ContainerMonitoringAddonName: k8sComponentVersions["1.11"][ContainerMonitoringAddonName], AzureCNINetworkMonitoringAddonName: k8sComponentVersions["1.11"][AzureCNINetworkMonitoringAddonName], DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.11"]["cluster-autoscaler"], + NVIDIADevicePluginAddonName: k8sComponentVersions["1.11"][NVIDIADevicePluginAddonName], "nodestatusfreq": k8sComponentVersions["1.11"]["nodestatusfreq"], "nodegraceperiod": k8sComponentVersions["1.11"]["nodegraceperiod"], "podeviction": k8sComponentVersions["1.11"]["podeviction"], @@ -50,6 +51,51 @@ func TestGetK8sVersionComponents(t *testing.T) { } } + oneDotTenDotZero := getK8sVersionComponents("1.10.0", nil) + if oneDotTenDotZero == nil { + t.Fatalf("getK8sVersionComponents() should not return nil for valid version") + } + expected = map[string]string{ + "hyperkube": "hyperkube-amd64:v1.10.0", + "ccm": "cloud-controller-manager-amd64:v1.10.0", + "windowszip": "v1.10.0-1int.zip", + "dockerEngineVersion": k8sComponentVersions["1.10"]["dockerEngine"], + DefaultDashboardAddonName: k8sComponentVersions["1.10"]["dashboard"], + "exechealthz": k8sComponentVersions["1.10"]["exechealthz"], + "addonresizer": k8sComponentVersions["1.10"]["addon-resizer"], + "heapster": k8sComponentVersions["1.10"]["heapster"], + DefaultMetricsServerAddonName: k8sComponentVersions["1.10"]["metrics-server"], + "dns": k8sComponentVersions["1.10"]["kube-dns"], + "addonmanager": k8sComponentVersions["1.10"]["addon-manager"], + "dnsmasq": k8sComponentVersions["1.10"]["dnsmasq"], + "pause": k8sComponentVersions["1.10"]["pause"], + DefaultTillerAddonName: k8sComponentVersions["1.10"]["tiller"], + DefaultReschedulerAddonName: k8sComponentVersions["1.10"]["rescheduler"], + DefaultACIConnectorAddonName: k8sComponentVersions["1.10"]["aci-connector"], + ContainerMonitoringAddonName: k8sComponentVersions["1.10"][ContainerMonitoringAddonName], + AzureCNINetworkMonitoringAddonName: k8sComponentVersions["1.10"][AzureCNINetworkMonitoringAddonName], + DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.10"]["cluster-autoscaler"], + NVIDIADevicePluginAddonName: k8sComponentVersions["1.10"][NVIDIADevicePluginAddonName], + "nodestatusfreq": k8sComponentVersions["1.10"]["nodestatusfreq"], + "nodegraceperiod": k8sComponentVersions["1.10"]["nodegraceperiod"], + "podeviction": k8sComponentVersions["1.10"]["podeviction"], + "routeperiod": k8sComponentVersions["1.10"]["routeperiod"], + "backoffretries": k8sComponentVersions["1.10"]["backoffretries"], + "backoffjitter": k8sComponentVersions["1.10"]["backoffjitter"], + "backoffduration": k8sComponentVersions["1.10"]["backoffduration"], + "backoffexponent": k8sComponentVersions["1.10"]["backoffexponent"], + "ratelimitqps": k8sComponentVersions["1.10"]["ratelimitqps"], + "ratelimitbucket": k8sComponentVersions["1.10"]["ratelimitbucket"], + "gchighthreshold": k8sComponentVersions["1.10"]["gchighthreshold"], + "gclowthreshold": k8sComponentVersions["1.10"]["gclowthreshold"], + } + + for k, v := range oneDotTenDotZero { + if expected[k] != v { + t.Fatalf("getK8sVersionComponents() returned an unexpected map[string]string value for k8s 1.10.0: %s = %s", k, oneDotTenDotZero[k]) + } + } + oneDotNineDotThree := getK8sVersionComponents("1.9.3", nil) if oneDotNineDotThree == nil { t.Fatalf("getK8sVersionComponents() should not return nil for valid version") @@ -71,7 +117,7 @@ func TestGetK8sVersionComponents(t *testing.T) { DefaultTillerAddonName: k8sComponentVersions["1.9"]["tiller"], DefaultReschedulerAddonName: k8sComponentVersions["1.9"]["rescheduler"], DefaultACIConnectorAddonName: k8sComponentVersions["1.9"]["aci-connector"], - ContainerMonitoringAddonName: k8sComponentVersions["1.11"][ContainerMonitoringAddonName], + ContainerMonitoringAddonName: k8sComponentVersions["1.9"][ContainerMonitoringAddonName], AzureCNINetworkMonitoringAddonName: k8sComponentVersions["1.9"][AzureCNINetworkMonitoringAddonName], DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.9"]["cluster-autoscaler"], "nodestatusfreq": k8sComponentVersions["1.9"]["nodestatusfreq"], diff --git a/pkg/acsengine/params_k8s.go b/pkg/acsengine/params_k8s.go index 1ca849050d..2dfdae469f 100644 --- a/pkg/acsengine/params_k8s.go +++ b/pkg/acsengine/params_k8s.go @@ -124,13 +124,13 @@ func assignKubernetesParameters(properties *api.Properties, parametersMap params addValue(parametersMap, "kubernetesMetricsServerSpec", cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase+KubeConfigs[k8sVersion][DefaultMetricsServerAddonName]) } } - nvidiaDevicePluginAddon := getAddonByName(properties.OrchestratorProfile.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonName) - c = getAddonContainersIndexByName(nvidiaDevicePluginAddon.Containers, DefaultNVIDIADevicePluginAddonName) + nvidiaDevicePluginAddon := getAddonByName(properties.OrchestratorProfile.KubernetesConfig.Addons, NVIDIADevicePluginAddonName) + c = getAddonContainersIndexByName(nvidiaDevicePluginAddon.Containers, NVIDIADevicePluginAddonName) if c > -1 { if nvidiaDevicePluginAddon.Containers[c].Image != "" { addValue(parametersMap, "kubernetesNVIDIADevicePluginSpec", nvidiaDevicePluginAddon.Containers[c].Image) } else { - addValue(parametersMap, "kubernetesNVIDIADevicePluginSpec", cloudSpecConfig.KubernetesSpecConfig.NVIDIAImageBase+KubeConfigs[k8sVersion][DefaultNVIDIADevicePluginAddonName]) + addValue(parametersMap, "kubernetesNVIDIADevicePluginSpec", cloudSpecConfig.KubernetesSpecConfig.NVIDIAImageBase+KubeConfigs[k8sVersion][NVIDIADevicePluginAddonName]) } } containerMonitoringAddon := getAddonByName(properties.OrchestratorProfile.KubernetesConfig.Addons, ContainerMonitoringAddonName) diff --git a/pkg/acsengine/template_generator.go b/pkg/acsengine/template_generator.go index 9266e2c332..aefc02f023 100644 --- a/pkg/acsengine/template_generator.go +++ b/pkg/acsengine/template_generator.go @@ -188,6 +188,10 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat storagetier, _ := getStorageAccountType(profile.VMSize) buf.WriteString(fmt.Sprintf(",storageprofile=managed,storagetier=%s", storagetier)) } + if isNSeriesSKU(profile) { + accelerator := "nvidia" + buf.WriteString(fmt.Sprintf(",accelerator=%s", accelerator)) + } buf.WriteString(fmt.Sprintf(",kubernetes.azure.com/cluster=%s", rg)) for k, v := range profile.CustomNodeLabels { buf.WriteString(fmt.Sprintf(",%s=%s", k, v)) @@ -752,8 +756,8 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat rC := getAddonContainersIndexByName(reschedulerAddon.Containers, DefaultReschedulerAddonName) metricsServerAddon := getAddonByName(cs.Properties.OrchestratorProfile.KubernetesConfig.Addons, DefaultMetricsServerAddonName) mC := getAddonContainersIndexByName(metricsServerAddon.Containers, DefaultMetricsServerAddonName) - nvidiaDevicePluginAddon := getAddonByName(cs.Properties.OrchestratorProfile.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonName) - nC := getAddonContainersIndexByName(nvidiaDevicePluginAddon.Containers, DefaultNVIDIADevicePluginAddonName) + nvidiaDevicePluginAddon := getAddonByName(cs.Properties.OrchestratorProfile.KubernetesConfig.Addons, NVIDIADevicePluginAddonName) + nC := getAddonContainersIndexByName(nvidiaDevicePluginAddon.Containers, NVIDIADevicePluginAddonName) switch attr { case "kubernetesHyperkubeSpec": val = cs.Properties.OrchestratorProfile.KubernetesConfig.KubernetesImageBase + KubeConfigs[k8sVersion]["hyperkube"] @@ -959,7 +963,7 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat val = nvidiaDevicePluginAddon.Containers[nC].Image } } else { - val = cloudSpecConfig.KubernetesSpecConfig.NVIDIAImageBase + KubeConfigs[k8sVersion][DefaultNVIDIADevicePluginAddonName] + val = cloudSpecConfig.KubernetesSpecConfig.NVIDIAImageBase + KubeConfigs[k8sVersion][NVIDIADevicePluginAddonName] } case "kubernetesReschedulerSpec": if rC > -1 { diff --git a/pkg/api/const.go b/pkg/api/const.go index ca5b112475..2cba4f69a6 100644 --- a/pkg/api/const.go +++ b/pkg/api/const.go @@ -119,8 +119,8 @@ const ( DefaultReschedulerAddonName = "rescheduler" // DefaultMetricsServerAddonName is the name of the kubernetes metrics server addon deployment DefaultMetricsServerAddonName = "metrics-server" - // DefaultNVIDIADevicePluginAddonName is the name of the NVIDIA device plugin addon deployment - DefaultNVIDIADevicePluginAddonName = "nvidia-device-plugin" + // NVIDIADevicePluginAddonName is the name of the NVIDIA device plugin addon deployment + NVIDIADevicePluginAddonName = "nvidia-device-plugin" // ContainerMonitoringAddonName is the name of the kubernetes Container Monitoring addon deployment ContainerMonitoringAddonName = "container-monitoring" // DefaultPrivateClusterEnabled determines the acs-engine provided default for enabling kubernetes Private Cluster diff --git a/pkg/api/types.go b/pkg/api/types.go index d486f04022..f443dc2a9b 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -943,7 +943,7 @@ func (p *Properties) IsNVIDIADevicePluginEnabled() bool { k := p.OrchestratorProfile.KubernetesConfig o := p.OrchestratorProfile for i := range k.Addons { - if k.Addons[i].Name == DefaultNVIDIADevicePluginAddonName { + if k.Addons[i].Name == NVIDIADevicePluginAddonName { nvidiaDevicePluginAddon = k.Addons[i] } } diff --git a/pkg/api/types_test.go b/pkg/api/types_test.go index 6a6d346587..5626976f0f 100644 --- a/pkg/api/types_test.go +++ b/pkg/api/types_test.go @@ -898,7 +898,7 @@ func TestIsNVIDIADevicePluginEnabled(t *testing.T) { p.AgentPoolProfiles[0].VMSize = "Standard_D2_v2" p.OrchestratorProfile.KubernetesConfig.Addons = []KubernetesAddon{ { - Name: DefaultNVIDIADevicePluginAddonName, + Name: NVIDIADevicePluginAddonName, Enabled: helpers.PointerToBool(false), }, } diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index 59395c5b13..baeca617fd 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -427,24 +427,26 @@ func (a *Properties) validateAddons() error { return fmt.Errorf("Cluster Autoscaler add-on can only be used with VirtualMachineScaleSets. Please specify \"availabilityProfile\": \"%s\"", VirtualMachineScaleSets) } case "nvidia-device-plugin": - version := common.RationalizeReleaseAndVersion( - a.OrchestratorProfile.OrchestratorType, - a.OrchestratorProfile.OrchestratorRelease, - a.OrchestratorProfile.OrchestratorVersion, - false) - if version == "" { - return fmt.Errorf("the following user supplied OrchestratorProfile configuration is not supported: OrchestratorType: %s, OrchestratorRelease: %s, OrchestratorVersion: %s. Please check supported Release or Version for this build of acs-engine", a.OrchestratorProfile.OrchestratorType, a.OrchestratorProfile.OrchestratorRelease, a.OrchestratorProfile.OrchestratorVersion) - } - sv, err := semver.Make(version) - if err != nil { - return fmt.Errorf("could not validate version %s", version) - } - minVersion, err := semver.Make("1.10.0") - if err != nil { - return fmt.Errorf("could not validate version") - } - if isNSeriesSKU && sv.LT(minVersion) { - return fmt.Errorf("NVIDIA Device Plugin add-on can only be used Kubernetes 1.10 or above. Please specify \"orchestratorRelease\": \"1.10\"") + if helpers.IsTrueBoolPointer(addon.Enabled) { + version := common.RationalizeReleaseAndVersion( + a.OrchestratorProfile.OrchestratorType, + a.OrchestratorProfile.OrchestratorRelease, + a.OrchestratorProfile.OrchestratorVersion, + false) + if version == "" { + return fmt.Errorf("the following user supplied OrchestratorProfile configuration is not supported: OrchestratorType: %s, OrchestratorRelease: %s, OrchestratorVersion: %s. Please check supported Release or Version for this build of acs-engine", a.OrchestratorProfile.OrchestratorType, a.OrchestratorProfile.OrchestratorRelease, a.OrchestratorProfile.OrchestratorVersion) + } + sv, err := semver.Make(version) + if err != nil { + return fmt.Errorf("could not validate version %s", version) + } + minVersion, err := semver.Make("1.10.0") + if err != nil { + return fmt.Errorf("could not validate version") + } + if isNSeriesSKU && sv.LT(minVersion) { + return fmt.Errorf("NVIDIA Device Plugin add-on can only be used Kubernetes 1.10 or above. Please specify \"orchestratorRelease\": \"1.10\"") + } } } } From 1830fb2c28f4949fa4752cf4dd122a66d4583495 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Wed, 27 Jun 2018 16:30:20 -0700 Subject: [PATCH 41/74] added support for k8s v1.11.0 (#3379) --- pkg/api/common/versions.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/api/common/versions.go b/pkg/api/common/versions.go index 8678d13bdd..5366f27dbf 100644 --- a/pkg/api/common/versions.go +++ b/pkg/api/common/versions.go @@ -66,6 +66,7 @@ var AllKubernetesSupportedVersions = map[string]bool{ "1.11.0-rc.1": true, "1.11.0-rc.2": true, "1.11.0-rc.3": true, + "1.11.0": true, } // GetDefaultKubernetesVersion returns the default Kubernetes version, that is the latest patch of the default release From 8f401a15952937e7243ae30982b6e74cc49809da Mon Sep 17 00:00:00 2001 From: Yongli Chen Date: Thu, 28 Jun 2018 14:33:49 -0700 Subject: [PATCH 42/74] Add Azure-npm to provide native k8s network policy support (#3205) --- docs/clusterdefinition.md | 3 +- ...netesmasteraddons-azure-npm-daemonset.yaml | 101 ++++++++++++++++++ pkg/acsengine/addons.go | 5 + pkg/acsengine/const.go | 6 +- pkg/acsengine/defaults.go | 32 +++++- pkg/api/const.go | 4 +- pkg/api/convertertoapi.go | 3 +- pkg/api/vlabs/validate.go | 13 ++- pkg/api/vlabs/validate_test.go | 27 +++-- test/e2e/kubernetes/kubernetes_test.go | 2 +- 10 files changed, 177 insertions(+), 19 deletions(-) create mode 100644 parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index 38ef437599..0098e0d460 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -54,7 +54,8 @@ Here are the valid values for the orchestrator types: | kubeletConfig | no | Configure various runtime configuration for kubelet. See `kubeletConfig` [below](#feat-kubelet-config) | | kubernetesImageBase | no | Specifies the base URL (everything preceding the actual image filename) of the kubernetes hyperkube image to use for cluster deployment, e.g., `k8s-gcrio.azureedge.net/` | | networkPlugin | no | Specifies the network plugin implementation for the cluster. Valid values are:
`"azure"` (default), which provides an Azure native networking experience
`"kubenet"` for k8s software networking implementation.
`"flannel"` for using CoreOS Flannel
`"cilium"` for using the default Cilium CNI IPAM | -| networkPolicy | no | Specifies the network policy enforcement tool for the cluster (currently Linux-only). Valid values are:
`calico` for Calico network policy.
`cilium` for cilium network policy (Lin).
See [network policy examples](../examples/networkpolicy) for more information | + +|networkPolicy|no|Specifies the network policy enforcement tool for the cluster (currently Linux-only). Valid values are:
`"calico"` for Calico network policy.
`"cilium"` for cilium network policy (Lin), and `"azure"` (experimental) for Azure CNI-compliant network policy (note: Azure CNI-compliant network policy requires explicit `"networkPlugin": "azure"` configuration as well).
See [network policy examples](../examples/networkpolicy) for more information. | | privateCluster | no | Build a cluster without public addresses assigned. See `privateClusters` [below](#feat-private-cluster). | | schedulerConfig | no | Configure various runtime configuration for scheduler. See `schedulerConfig` [below](#feat-scheduler-config) | | serviceCidr | no | IP range for Service IPs, Default is "10.0.0.0/16". This range is never routed outside of a node so does not need to lie within clusterSubnet or the VNET | diff --git a/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml new file mode 100644 index 0000000000..67110032ec --- /dev/null +++ b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml @@ -0,0 +1,101 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: "EnsureExists" +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: "EnsureExists" +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: "EnsureExists" +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: azure-npm + namespace: kube-system + labels: + app: azure-npm + addonmanager.kubernetes.io/mode: "EnsureExists" +spec: + selector: + matchLabels: + k8s-app: azure-npm + template: + metadata: + labels: + k8s-app: azure-npm + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: containernetworking/azure-npm:v0.0.2 + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock + - name: log + mountPath: /var/log + priorityClassName: azure-npm-priority-class + hostNetwork: true + volumes: + - name: log + hostPath: + path: /var/log + type: Directory + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + serviceAccountName: azure-npm diff --git a/pkg/acsengine/addons.go b/pkg/acsengine/addons.go index 7c707843e1..811fc9c13b 100644 --- a/pkg/acsengine/addons.go +++ b/pkg/acsengine/addons.go @@ -72,6 +72,11 @@ func kubernetesAddonSettingsInit(profile *api.Properties) []kubernetesFeatureSet "kube-rescheduler-deployment.yaml", profile.OrchestratorProfile.KubernetesConfig.IsReschedulerEnabled(), }, + { + "kubernetesmasteraddons-azure-npm-daemonset.yaml", + "azure-npm-daemonset.yaml", + profile.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure && profile.OrchestratorProfile.KubernetesConfig.NetworkPlugin == NetworkPluginAzure, + }, { "kubernetesmasteraddons-calico-daemonset.yaml", "calico-daemonset.yaml", diff --git a/pkg/acsengine/const.go b/pkg/acsengine/const.go index 40ebf79e8f..afaa920f6d 100644 --- a/pkg/acsengine/const.go +++ b/pkg/acsengine/const.go @@ -50,7 +50,9 @@ const ( NetworkPolicyCalico = "calico" // NetworkPolicyCilium is the string expression for cilium network policy config option NetworkPolicyCilium = "cilium" - // NetworkPluginAzure is the string expression for Azure CNI network policy + // NetworkPolicyAzure is the string expression for Azure CNI network policy manager + NetworkPolicyAzure = "azure" + // NetworkPluginAzure is the string expression for Azure CNI plugin NetworkPluginAzure = "azure" // NetworkPluginKubenet is the string expression for kubenet network plugin NetworkPluginKubenet = "kubenet" @@ -142,6 +144,8 @@ const ( ContainerMonitoringAddonName = "container-monitoring" // AzureCNINetworkMonitoringAddonName is the name of the Azure CNI networkmonitor addon AzureCNINetworkMonitoringAddonName = "azure-cni-networkmonitor" + // AzureNetworkPolicyAddonName is the name of the Azure CNI networkmonitor addon + AzureNetworkPolicyAddonName = "azure-npm-daemonset" // DefaultKubernetesKubeletMaxPods is the max pods per kubelet DefaultKubernetesKubeletMaxPods = 110 // DefaultMasterEtcdServerPort is the default etcd server port for Kubernetes master nodes diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index 29f0730d4b..af5795c4de 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -344,6 +344,16 @@ var ( }, }, } + + // DefaultAzureNetworkPolicyAddonsConfig is the default Azure NetworkPolicy addon config + DefaultAzureNetworkPolicyAddonsConfig = api.KubernetesAddon{ + Name: AzureNetworkPolicyAddonName, + Containers: []api.KubernetesContainerSpec{ + { + Name: AzureNetworkPolicyAddonName, + }, + }, + } ) // setPropertiesDefaults for the container Properties, returns true if certs are generated @@ -394,8 +404,10 @@ func setOrchestratorDefaults(cs *api.ContainerService) { // and set a default network policy enforcement configuration switch o.KubernetesConfig.NetworkPolicy { case NetworkPluginAzure: - o.KubernetesConfig.NetworkPlugin = NetworkPluginAzure - o.KubernetesConfig.NetworkPolicy = DefaultNetworkPolicy + if o.KubernetesConfig.NetworkPlugin == "" { + o.KubernetesConfig.NetworkPlugin = NetworkPluginAzure + o.KubernetesConfig.NetworkPolicy = DefaultNetworkPolicy + } case NetworkPolicyNone: o.KubernetesConfig.NetworkPlugin = NetworkPluginKubenet o.KubernetesConfig.NetworkPolicy = DefaultNetworkPolicy @@ -417,6 +429,7 @@ func setOrchestratorDefaults(cs *api.ContainerService) { DefaultNVIDIADevicePluginAddonsConfig, DefaultContainerMonitoringAddonsConfig, DefaultAzureCNINetworkMonitorAddonsConfig, + DefaultAzureNetworkPolicyAddonsConfig, } enforceK8sAddonOverrides(o.KubernetesConfig.Addons, o) } else { @@ -468,6 +481,11 @@ func setOrchestratorDefaults(cs *api.ContainerService) { // Provide default acs-engine config for Azure CNI containernetworking Device Plugin o.KubernetesConfig.Addons = append(o.KubernetesConfig.Addons, DefaultAzureCNINetworkMonitorAddonsConfig) } + aNP := getAddonsIndexByName(o.KubernetesConfig.Addons, AzureNetworkPolicyAddonName) + if aNP < 0 { + // Provide default acs-engine config for Azure NetworkPolicy addon + o.KubernetesConfig.Addons = append(o.KubernetesConfig.Addons, DefaultAzureNetworkPolicyAddonsConfig) + } } if o.KubernetesConfig.KubernetesImageBase == "" { o.KubernetesConfig.KubernetesImageBase = cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase @@ -575,6 +593,10 @@ func setOrchestratorDefaults(cs *api.ContainerService) { if a.OrchestratorProfile.KubernetesConfig.Addons[aN].IsEnabled(a.OrchestratorProfile.IsAzureCNI()) { a.OrchestratorProfile.KubernetesConfig.Addons[aN] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[aN], DefaultAzureCNINetworkMonitorAddonsConfig) } + aNP := getAddonsIndexByName(a.OrchestratorProfile.KubernetesConfig.Addons, AzureNetworkPolicyAddonName) + if a.OrchestratorProfile.KubernetesConfig.Addons[aNP].IsEnabled(a.OrchestratorProfile.KubernetesConfig.NetworkPlugin == NetworkPluginAzure && a.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure) { + a.OrchestratorProfile.KubernetesConfig.Addons[aNP] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[aNP], DefaultAzureNetworkPolicyAddonsConfig) + } if o.KubernetesConfig.PrivateCluster == nil { o.KubernetesConfig.PrivateCluster = &api.PrivateCluster{} @@ -1105,6 +1127,8 @@ func enforceK8sAddonOverrides(addons []api.KubernetesAddon, o *api.OrchestratorP o.KubernetesConfig.Addons[m].Enabled = k8sVersionMetricsServerAddonEnabled(o) aN := getAddonsIndexByName(o.KubernetesConfig.Addons, AzureCNINetworkMonitoringAddonName) o.KubernetesConfig.Addons[aN].Enabled = azureCNINetworkMonitorAddonEnabled(o) + aNP := getAddonsIndexByName(o.KubernetesConfig.Addons, AzureNetworkPolicyAddonName) + o.KubernetesConfig.Addons[aNP].Enabled = azureNetworkPolicyAddonEnabled(o) } func k8sVersionMetricsServerAddonEnabled(o *api.OrchestratorProfile) *bool { @@ -1115,6 +1139,10 @@ func azureCNINetworkMonitorAddonEnabled(o *api.OrchestratorProfile) *bool { return helpers.PointerToBool(o.IsAzureCNI()) } +func azureNetworkPolicyAddonEnabled(o *api.OrchestratorProfile) *bool { + return helpers.PointerToBool(o.KubernetesConfig.NetworkPlugin == NetworkPluginAzure && o.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure) +} + func generateEtcdEncryptionKey() string { b := make([]byte, 32) rand.Read(b) diff --git a/pkg/api/const.go b/pkg/api/const.go index 2cba4f69a6..420468f097 100644 --- a/pkg/api/const.go +++ b/pkg/api/const.go @@ -125,12 +125,14 @@ const ( ContainerMonitoringAddonName = "container-monitoring" // DefaultPrivateClusterEnabled determines the acs-engine provided default for enabling kubernetes Private Cluster DefaultPrivateClusterEnabled = false - // NetworkPolicyAzure is the string expression for the deprecated NetworkPolicy usage pattern "azure" + // NetworkPolicyAzure is the string expression for Azure CNI network policy manager NetworkPolicyAzure = "azure" // NetworkPolicyNone is the string expression for the deprecated NetworkPolicy usage pattern "none" NetworkPolicyNone = "none" // NetworkPluginKubenet is the string expression for the kubenet NetworkPlugin config NetworkPluginKubenet = "kubenet" + // NetworkPluginAzure is thee string expression for Azure CNI plugin. + NetworkPluginAzure = "azure" ) const ( diff --git a/pkg/api/convertertoapi.go b/pkg/api/convertertoapi.go index 1ec10d2392..66c1894308 100644 --- a/pkg/api/convertertoapi.go +++ b/pkg/api/convertertoapi.go @@ -718,7 +718,8 @@ func setVlabsKubernetesDefaults(vp *vlabs.Properties, api *OrchestratorProfile) if vp.OrchestratorProfile.KubernetesConfig != nil { // Included here for backwards compatibility with deprecated NetworkPolicy usage patterns - if vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure { + if vp.OrchestratorProfile.KubernetesConfig.NetworkPlugin == "" && + vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure { api.KubernetesConfig.NetworkPlugin = vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy api.KubernetesConfig.NetworkPolicy = "" // no-op but included for emphasis } else if vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyNone { diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index baeca617fd..148dc1c740 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -38,6 +38,10 @@ var ( networkPlugin: "azure", networkPolicy: "", }, + { + networkPlugin: "azure", + networkPolicy: "azure", + }, { networkPlugin: "kubenet", networkPolicy: "", @@ -994,7 +998,7 @@ func (k *KubernetesConfig) Validate(k8sVersion string, hasWindows bool) error { if e := k.validateNetworkPlugin(); e != nil { return e } - if e := k.validateNetworkPolicy(hasWindows); e != nil { + if e := k.validateNetworkPolicy(k8sVersion, hasWindows); e != nil { return e } if e := k.validateNetworkPluginPlusPolicy(); e != nil { @@ -1023,9 +1027,10 @@ func (k *KubernetesConfig) validateNetworkPlugin() error { return nil } -func (k *KubernetesConfig) validateNetworkPolicy(hasWindows bool) error { +func (k *KubernetesConfig) validateNetworkPolicy(k8sVersion string, hasWindows bool) error { networkPolicy := k.NetworkPolicy + networkPlugin := k.NetworkPlugin // Check NetworkPolicy has a valid value. valid := false @@ -1039,6 +1044,10 @@ func (k *KubernetesConfig) validateNetworkPolicy(hasWindows bool) error { return fmt.Errorf("unknown networkPolicy '%s' specified", networkPolicy) } + if networkPolicy == "azure" && networkPlugin == "azure" && !common.IsKubernetesVersionGe(k8sVersion, "1.8.0") { + return fmt.Errorf("networkPolicy azure requires kubernetes version of 1.8 or higher") + } + // Temporary safety check, to be removed when Windows support is added. if (networkPolicy == "calico" || networkPolicy == "cilium" || networkPolicy == "flannel") && hasWindows { return fmt.Errorf("networkPolicy '%s' is not supporting windows agents", networkPolicy) diff --git a/pkg/api/vlabs/validate_test.go b/pkg/api/vlabs/validate_test.go index d67e3a232c..7a23cdb7b4 100644 --- a/pkg/api/vlabs/validate_test.go +++ b/pkg/api/vlabs/validate_test.go @@ -550,40 +550,51 @@ func Test_Properties_ValidateNetworkPolicy(t *testing.T) { p.OrchestratorProfile = &OrchestratorProfile{} p.OrchestratorProfile.OrchestratorType = Kubernetes + k8sVersion := "1.8.0" for _, policy := range NetworkPolicyValues { p.OrchestratorProfile.KubernetesConfig = &KubernetesConfig{} p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = policy - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(false); err != nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, false); err != nil { t.Errorf( - "should not error on networkPolicy=\"%s\"", + "should not error on networkPolicy=\"%s\" on k8sVersion=\"%s\"", policy, + k8sVersion, ) } } p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "not-existing" - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(false); err == nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, false); err == nil { t.Errorf( "should error on invalid networkPolicy", ) } + k8sVersion = "1.7.9" + p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "azure" + p.OrchestratorProfile.KubernetesConfig.NetworkPlugin = "azure" + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, false); err == nil { + t.Errorf( + "should error on azure networkPolicy + azure networkPlugin with k8s version < 1.8.0", + ) + } + p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "calico" - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(true); err == nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, true); err == nil { t.Errorf( "should error on calico for windows clusters", ) } p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "cilium" - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(true); err == nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, true); err == nil { t.Errorf( "should error on cilium for windows clusters", ) } p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "flannel" - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(true); err == nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, true); err == nil { t.Errorf( "should error on flannel for windows clusters", ) @@ -644,10 +655,6 @@ func Test_Properties_ValidateNetworkPluginPlusPolicy(t *testing.T) { networkPlugin: "azure", networkPolicy: "flannel", }, - { - networkPlugin: "azure", - networkPolicy: "azure", - }, { networkPlugin: "kubenet", networkPolicy: "none", diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 44f35b534c..1dbb7152ef 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -690,7 +690,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Describe("with calico network policy enabled", func() { It("should apply a network policy and deny outbound internet access to nginx pod", func() { - if eng.HasNetworkPolicy("calico") { + if eng.HasNetworkPolicy("calico") || eng.HasNetworkPolicy("azure") { namespace := "default" By("Creating a nginx deployment") r := rand.New(rand.NewSource(time.Now().UnixNano())) From 45b5f175c2b769a5ec945a1fd104faeef356507d Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 28 Jun 2018 14:38:30 -0700 Subject: [PATCH 43/74] rev versions for k8s 1.11 (#3374) --- pkg/acsengine/k8s_versions.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/acsengine/k8s_versions.go b/pkg/acsengine/k8s_versions.go index 482c06fbb9..279321d414 100644 --- a/pkg/acsengine/k8s_versions.go +++ b/pkg/acsengine/k8s_versions.go @@ -13,14 +13,14 @@ var k8sComponentVersions = map[string]map[string]string{ "dashboard": "kubernetes-dashboard-amd64:v1.8.3", "exechealthz": "exechealthz-amd64:1.2", "addon-resizer": "addon-resizer:1.8.1", - "heapster": "heapster-amd64:v1.5.1", + "heapster": "heapster-amd64:v1.5.3", "metrics-server": "metrics-server-amd64:v0.2.1", - "kube-dns": "k8s-dns-kube-dns-amd64:1.14.8", + "kube-dns": "k8s-dns-kube-dns-amd64:1.14.10", "addon-manager": "kube-addon-manager-amd64:v8.6", - "dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.8", + "dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.10", "pause": "pause-amd64:3.1", "tiller": "tiller:v2.8.1", - "rescheduler": "rescheduler:v0.3.1", + "rescheduler": "rescheduler:v0.4.0", "aci-connector": "virtual-kubelet:latest", ContainerMonitoringAddonName: "oms:ciprod05082018", AzureCNINetworkMonitoringAddonName: "networkmonitor:v0.0.4", From 6f6383535424ddc2716b39450ff10fb2cb55b231 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Mon, 2 Jul 2018 10:19:05 -0700 Subject: [PATCH 44/74] enable upgrade to 1.11 (#3397) --- pkg/operations/kubernetesupgrade/upgradecluster.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/operations/kubernetesupgrade/upgradecluster.go b/pkg/operations/kubernetesupgrade/upgradecluster.go index fc85767268..6af732f3cb 100644 --- a/pkg/operations/kubernetesupgrade/upgradecluster.go +++ b/pkg/operations/kubernetesupgrade/upgradecluster.go @@ -123,6 +123,11 @@ func (uc *UpgradeCluster) UpgradeCluster(subscriptionID uuid.UUID, kubeConfig, r upgrader110.Init(uc.Translator, uc.Logger, uc.ClusterTopology, uc.Client, kubeConfig, uc.StepTimeout, acsengineVersion) upgrader = upgrader110 + case strings.HasPrefix(upgradeVersion, "1.11."): + upgrader111 := &Upgrader{} + upgrader111.Init(uc.Translator, uc.Logger, uc.ClusterTopology, uc.Client, kubeConfig, uc.StepTimeout, acsengineVersion) + upgrader = upgrader111 + default: return uc.Translator.Errorf("Upgrade to Kubernetes version %s is not supported", upgradeVersion) } From b8174ee73996c4d0a1cb330bce73e5d66b04dcdc Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Mon, 2 Jul 2018 10:49:53 -0700 Subject: [PATCH 45/74] added support for k8s 1.9.9 (#3388) --- pkg/api/common/versions.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/api/common/versions.go b/pkg/api/common/versions.go index 5366f27dbf..c41cf7be89 100644 --- a/pkg/api/common/versions.go +++ b/pkg/api/common/versions.go @@ -50,6 +50,7 @@ var AllKubernetesSupportedVersions = map[string]bool{ "1.9.6": true, "1.9.7": true, "1.9.8": true, + "1.9.9": true, "1.10.0-beta.2": true, "1.10.0-beta.4": true, "1.10.0-rc.1": true, From ab78b39ff40ac0c0763c6d326c66cbc51ba4f6d1 Mon Sep 17 00:00:00 2001 From: Stuart Leeks Date: Mon, 2 Jul 2018 20:19:28 +0100 Subject: [PATCH 46/74] Add bash completion command (#3392) --- cmd/root.go | 19 +++++++++++++++++++ docs/acsengine.md | 8 ++++++++ 2 files changed, 27 insertions(+) diff --git a/cmd/root.go b/cmd/root.go index 50b4f23816..e1d6af7f09 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "os" "github.com/Azure/acs-engine/pkg/armhelpers" "github.com/Azure/go-autorest/autorest/azure" @@ -45,6 +46,24 @@ func NewRootCmd() *cobra.Command { rootCmd.AddCommand(newScaleCmd()) rootCmd.AddCommand(newDcosUpgradeCmd()) + var completionCmd = &cobra.Command{ + Use: "completion", + Short: "Generates bash completion scripts", + Long: `To load completion run + + source <(acs-engine completion) + + To configure your bash shell to load completions for each session, add this to your bashrc + + # ~/.bashrc or ~/.profile + source <(acs-engine completion) + `, + Run: func(cmd *cobra.Command, args []string) { + rootCmd.GenBashCompletion(os.Stdout) + }, + } + rootCmd.AddCommand(completionCmd) + return rootCmd } diff --git a/docs/acsengine.md b/docs/acsengine.md index 89ab53bb14..27c5ccd331 100644 --- a/docs/acsengine.md +++ b/docs/acsengine.md @@ -12,6 +12,14 @@ You can also choose to install acs-engine using [gofish](https://www.gofi.sh/#ab If you would prefer to build `acs-engine` from source or you are interested in contributing to `acs-engine` see [building from source](#build-acs-engine-from-source) below. +## Completion + +`acs-engine` supports bash completion. To enable this, add the following to your `.bashrc` or `~/.profile` + +```bash +source <(acs-engine completion) +``` + ## Usage `acs-engine` reads a JSON [cluster definition](./clusterdefinition.md) and generates a number of files that may be submitted to Azure Resource Manager (ARM). The generated files include: From 570b6de5dabea7a5219364b88481b20ea3edbf9b Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Mon, 2 Jul 2018 13:30:18 -0700 Subject: [PATCH 47/74] fix table (#3400) --- docs/clusterdefinition.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index 0098e0d460..3d76cd3704 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -54,8 +54,7 @@ Here are the valid values for the orchestrator types: | kubeletConfig | no | Configure various runtime configuration for kubelet. See `kubeletConfig` [below](#feat-kubelet-config) | | kubernetesImageBase | no | Specifies the base URL (everything preceding the actual image filename) of the kubernetes hyperkube image to use for cluster deployment, e.g., `k8s-gcrio.azureedge.net/` | | networkPlugin | no | Specifies the network plugin implementation for the cluster. Valid values are:
`"azure"` (default), which provides an Azure native networking experience
`"kubenet"` for k8s software networking implementation.
`"flannel"` for using CoreOS Flannel
`"cilium"` for using the default Cilium CNI IPAM | - -|networkPolicy|no|Specifies the network policy enforcement tool for the cluster (currently Linux-only). Valid values are:
`"calico"` for Calico network policy.
`"cilium"` for cilium network policy (Lin), and `"azure"` (experimental) for Azure CNI-compliant network policy (note: Azure CNI-compliant network policy requires explicit `"networkPlugin": "azure"` configuration as well).
See [network policy examples](../examples/networkpolicy) for more information. | +| networkPolicy | no | Specifies the network policy enforcement tool for the cluster (currently Linux-only). Valid values are:
`"calico"` for Calico network policy.
`"cilium"` for cilium network policy (Lin), and `"azure"` (experimental) for Azure CNI-compliant network policy (note: Azure CNI-compliant network policy requires explicit `"networkPlugin": "azure"` configuration as well).
See [network policy examples](../examples/networkpolicy) for more information. | | privateCluster | no | Build a cluster without public addresses assigned. See `privateClusters` [below](#feat-private-cluster). | | schedulerConfig | no | Configure various runtime configuration for scheduler. See `schedulerConfig` [below](#feat-scheduler-config) | | serviceCidr | no | IP range for Service IPs, Default is "10.0.0.0/16". This range is never routed outside of a node so does not need to lie within clusterSubnet or the VNET | From 40cda4d884bad3cd934734dc6898389ccea25559 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Mon, 2 Jul 2018 13:35:50 -0700 Subject: [PATCH 48/74] Use errgroup instead of counting goroutines (#3387) This is much safer for future modification. --- glide.lock | 8 +- glide.yaml | 11 +- pkg/acsengine/pki.go | 55 ++- vendor/golang.org/x/sync/AUTHORS | 3 + vendor/golang.org/x/sync/CONTRIBUTING.md | 26 ++ vendor/golang.org/x/sync/CONTRIBUTORS | 3 + vendor/golang.org/x/sync/LICENSE | 27 ++ vendor/golang.org/x/sync/PATENTS | 22 ++ vendor/golang.org/x/sync/README.md | 18 + vendor/golang.org/x/sync/codereview.cfg | 1 + vendor/golang.org/x/sync/errgroup/errgroup.go | 67 ++++ .../errgroup/errgroup_example_md5all_test.go | 101 +++++ .../x/sync/errgroup/errgroup_test.go | 176 +++++++++ .../golang.org/x/sync/semaphore/semaphore.go | 131 ++++++ .../x/sync/semaphore/semaphore_bench_test.go | 131 ++++++ .../sync/semaphore/semaphore_example_test.go | 84 ++++ .../x/sync/semaphore/semaphore_test.go | 171 ++++++++ .../x/sync/singleflight/singleflight.go | 111 ++++++ .../x/sync/singleflight/singleflight_test.go | 87 ++++ vendor/golang.org/x/sync/syncmap/map.go | 372 ++++++++++++++++++ .../x/sync/syncmap/map_bench_test.go | 216 ++++++++++ .../x/sync/syncmap/map_reference_test.go | 151 +++++++ vendor/golang.org/x/sync/syncmap/map_test.go | 172 ++++++++ 23 files changed, 2107 insertions(+), 37 deletions(-) create mode 100644 vendor/golang.org/x/sync/AUTHORS create mode 100644 vendor/golang.org/x/sync/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/README.md create mode 100644 vendor/golang.org/x/sync/codereview.cfg create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup_test.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore_example_test.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore_test.go create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight_test.go create mode 100644 vendor/golang.org/x/sync/syncmap/map.go create mode 100644 vendor/golang.org/x/sync/syncmap/map_bench_test.go create mode 100644 vendor/golang.org/x/sync/syncmap/map_reference_test.go create mode 100644 vendor/golang.org/x/sync/syncmap/map_test.go diff --git a/glide.lock b/glide.lock index 43144c197c..ffff64a952 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 6fa6f282ecddc833638528e7142991e81fbe33987514e2da1ece181a6470c568 -updated: 2018-06-08T11:35:55.674334-07:00 +hash: 90d55cc9eec600a57a5c52688b814b2daca46d5b4546fcd8a98b1d64d91422bd +updated: 2018-06-29T15:54:03.527724072-07:00 imports: - name: github.com/alexcesaro/statsd version: 7fea3f0d2fab1ad973e641e51dba45443a311a90 @@ -166,6 +166,10 @@ imports: - http2/hpack - idna - lex/httplex +- name: golang.org/x/sync + version: 1d60e4601c6fd243af51cc01ddf169918a5407ca + subpackages: + - errgroup - name: golang.org/x/sys version: 95c6576299259db960f6c5b9b69ea52422860fce subpackages: diff --git a/glide.yaml b/glide.yaml index e1a9351c7c..74031e5f65 100644 --- a/glide.yaml +++ b/glide.yaml @@ -58,16 +58,17 @@ import: version: 0841753fc26e934b715ca7a83dced5bcb721245a - package: k8s.io/client-go version: ~7.0.0 -# This is the same version client-go is pinned to - package: k8s.io/api version: 73d903622b7391f3312dcbac6483fed484e185f8 - package: github.com/Jeffail/gabs - version: 1.0 + version: "1.0" - package: github.com/rjtsdl/conform version: 1.2.1 - package: github.com/etgryphon/stringUp +- package: golang.org/x/sync + subpackages: + - errgroup testImport: -# glide isn't able to mutually reconcile pinned versions of these deps - package: github.com/onsi/gomega - package: github.com/onsi/ginkgo - package: github.com/kelseyhightower/envconfig @@ -77,4 +78,6 @@ testImport: version: ~7.0.0 - package: k8s.io/api version: 73d903622b7391f3312dcbac6483fed484e185f8 -- package: github.com/influxdata/influxdb/client/v2 +- package: github.com/influxdata/influxdb + subpackages: + - client/v2 diff --git a/pkg/acsengine/pki.go b/pkg/acsengine/pki.go index 83883d7e5c..a650cb4204 100644 --- a/pkg/acsengine/pki.go +++ b/pkg/acsengine/pki.go @@ -13,6 +13,8 @@ import ( "net" "time" + "golang.org/x/sync/errgroup" + log "github.com/sirupsen/logrus" ) @@ -58,7 +60,7 @@ func CreatePki(extraFQDNs []string, extraIPs []net.IP, clusterDomain string, caP etcdClientPrivateKey *rsa.PrivateKey etcdPeerCertPairs []*PkiKeyCertPair ) - errors := make(chan error) + var group errgroup.Group var err error caCertificate, err = pemToCertificate(caPair.CertificatePem) @@ -70,62 +72,53 @@ func CreatePki(extraFQDNs []string, extraIPs []net.IP, clusterDomain string, caP return nil, nil, nil, nil, nil, nil, err } - go func() { - var err error + group.Go(func() (err error) { apiServerCertificate, apiServerPrivateKey, err = createCertificate("apiserver", caCertificate, caPrivateKey, false, true, extraFQDNs, extraIPs, nil) - errors <- err - }() + return err + }) - go func() { - var err error + group.Go(func() (err error) { organization := make([]string, 1) organization[0] = "system:masters" clientCertificate, clientPrivateKey, err = createCertificate("client", caCertificate, caPrivateKey, false, false, nil, nil, organization) - errors <- err - }() + return err + }) - go func() { - var err error + group.Go(func() (err error) { organization := make([]string, 1) organization[0] = "system:masters" kubeConfigCertificate, kubeConfigPrivateKey, err = createCertificate("client", caCertificate, caPrivateKey, false, false, nil, nil, organization) - errors <- err - }() + return err + }) - go func() { - var err error + group.Go(func() (err error) { ip := net.ParseIP("127.0.0.1").To4() peerIPs := append(extraIPs, ip) etcdServerCertificate, etcdServerPrivateKey, err = createCertificate("etcdserver", caCertificate, caPrivateKey, true, true, nil, peerIPs, nil) - errors <- err - }() + return err + }) - go func() { - var err error + group.Go(func() (err error) { ip := net.ParseIP("127.0.0.1").To4() peerIPs := append(extraIPs, ip) etcdClientCertificate, etcdClientPrivateKey, err = createCertificate("etcdclient", caCertificate, caPrivateKey, true, false, nil, peerIPs, nil) - errors <- err - }() + return err + }) etcdPeerCertPairs = make([]*PkiKeyCertPair, masterCount) for i := 0; i < masterCount; i++ { - go func(i int) { - var err error + i := i + group.Go(func() (err error) { ip := net.ParseIP("127.0.0.1").To4() peerIPs := append(extraIPs, ip) etcdPeerCertificate, etcdPeerPrivateKey, err := createCertificate("etcdpeer", caCertificate, caPrivateKey, true, false, nil, peerIPs, nil) etcdPeerCertPairs[i] = &PkiKeyCertPair{CertificatePem: string(certificateToPem(etcdPeerCertificate.Raw)), PrivateKeyPem: string(privateKeyToPem(etcdPeerPrivateKey))} - errors <- err - }(i) + return err + }) } - e := make([]error, (masterCount + 5)) - for i := 0; i < len(e); i++ { - e[i] = <-errors - if e[i] != nil { - return nil, nil, nil, nil, nil, nil, e[i] - } + if err := group.Wait(); err != nil { + return nil, nil, nil, nil, nil, nil, err } return &PkiKeyCertPair{CertificatePem: string(certificateToPem(apiServerCertificate.Raw)), PrivateKeyPem: string(privateKeyToPem(apiServerPrivateKey))}, diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTING.md b/vendor/golang.org/x/sync/CONTRIBUTING.md new file mode 100644 index 0000000000..d0485e887a --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/README.md b/vendor/golang.org/x/sync/README.md new file mode 100644 index 0000000000..1f8436cc9c --- /dev/null +++ b/vendor/golang.org/x/sync/README.md @@ -0,0 +1,18 @@ +# Go Sync + +This repository provides Go concurrency primitives in addition to the +ones provided by the language and "sync" and "sync/atomic" packages. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/sync`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the sync repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sync/codereview.cfg b/vendor/golang.org/x/sync/codereview.cfg new file mode 100644 index 0000000000..3f8b14b64e --- /dev/null +++ b/vendor/golang.org/x/sync/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000000..533438d91c --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "sync" + + "golang.org/x/net/context" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go b/vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go new file mode 100644 index 0000000000..714b5aea77 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errgroup_test + +import ( + "crypto/md5" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +// Pipeline demonstrates the use of a Group to implement a multi-stage +// pipeline: a version of the MD5All function with bounded parallelism from +// https://blog.golang.org/pipelines. +func ExampleGroup_pipeline() { + m, err := MD5All(context.Background(), ".") + if err != nil { + log.Fatal(err) + } + + for k, sum := range m { + fmt.Printf("%s:\t%x\n", k, sum) + } +} + +type result struct { + path string + sum [md5.Size]byte +} + +// MD5All reads all the files in the file tree rooted at root and returns a map +// from file path to the MD5 sum of the file's contents. If the directory walk +// fails or any read operation fails, MD5All returns an error. +func MD5All(ctx context.Context, root string) (map[string][md5.Size]byte, error) { + // ctx is canceled when g.Wait() returns. When this version of MD5All returns + // - even in case of error! - we know that all of the goroutines have finished + // and the memory they were using can be garbage-collected. + g, ctx := errgroup.WithContext(ctx) + paths := make(chan string) + + g.Go(func() error { + defer close(paths) + return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() { + return nil + } + select { + case paths <- path: + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }) + + // Start a fixed number of goroutines to read and digest files. + c := make(chan result) + const numDigesters = 20 + for i := 0; i < numDigesters; i++ { + g.Go(func() error { + for path := range paths { + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + select { + case c <- result{path, md5.Sum(data)}: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + } + go func() { + g.Wait() + close(c) + }() + + m := make(map[string][md5.Size]byte) + for r := range c { + m[r.path] = r.sum + } + // Check whether any of the goroutines failed. Since g is accumulating the + // errors, we don't need to send them (or check for them) in the individual + // results sent on the channel. + if err := g.Wait(); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup_test.go b/vendor/golang.org/x/sync/errgroup/errgroup_test.go new file mode 100644 index 0000000000..6a9696efc6 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup_test.go @@ -0,0 +1,176 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errgroup_test + +import ( + "errors" + "fmt" + "net/http" + "os" + "testing" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +var ( + Web = fakeSearch("web") + Image = fakeSearch("image") + Video = fakeSearch("video") +) + +type Result string +type Search func(ctx context.Context, query string) (Result, error) + +func fakeSearch(kind string) Search { + return func(_ context.Context, query string) (Result, error) { + return Result(fmt.Sprintf("%s result for %q", kind, query)), nil + } +} + +// JustErrors illustrates the use of a Group in place of a sync.WaitGroup to +// simplify goroutine counting and error handling. This example is derived from +// the sync.WaitGroup example at https://golang.org/pkg/sync/#example_WaitGroup. +func ExampleGroup_justErrors() { + var g errgroup.Group + var urls = []string{ + "http://www.golang.org/", + "http://www.google.com/", + "http://www.somestupidname.com/", + } + for _, url := range urls { + // Launch a goroutine to fetch the URL. + url := url // https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + // Fetch the URL. + resp, err := http.Get(url) + if err == nil { + resp.Body.Close() + } + return err + }) + } + // Wait for all HTTP fetches to complete. + if err := g.Wait(); err == nil { + fmt.Println("Successfully fetched all URLs.") + } +} + +// Parallel illustrates the use of a Group for synchronizing a simple parallel +// task: the "Google Search 2.0" function from +// https://talks.golang.org/2012/concurrency.slide#46, augmented with a Context +// and error-handling. +func ExampleGroup_parallel() { + Google := func(ctx context.Context, query string) ([]Result, error) { + g, ctx := errgroup.WithContext(ctx) + + searches := []Search{Web, Image, Video} + results := make([]Result, len(searches)) + for i, search := range searches { + i, search := i, search // https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + result, err := search(ctx, query) + if err == nil { + results[i] = result + } + return err + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + return results, nil + } + + results, err := Google(context.Background(), "golang") + if err != nil { + fmt.Fprintln(os.Stderr, err) + return + } + for _, result := range results { + fmt.Println(result) + } + + // Output: + // web result for "golang" + // image result for "golang" + // video result for "golang" +} + +func TestZeroGroup(t *testing.T) { + err1 := errors.New("errgroup_test: 1") + err2 := errors.New("errgroup_test: 2") + + cases := []struct { + errs []error + }{ + {errs: []error{}}, + {errs: []error{nil}}, + {errs: []error{err1}}, + {errs: []error{err1, nil}}, + {errs: []error{err1, nil, err2}}, + } + + for _, tc := range cases { + var g errgroup.Group + + var firstErr error + for i, err := range tc.errs { + err := err + g.Go(func() error { return err }) + + if firstErr == nil && err != nil { + firstErr = err + } + + if gErr := g.Wait(); gErr != firstErr { + t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+ + "g.Wait() = %v; want %v", + g, tc.errs[:i+1], err, firstErr) + } + } + } +} + +func TestWithContext(t *testing.T) { + errDoom := errors.New("group_test: doomed") + + cases := []struct { + errs []error + want error + }{ + {want: nil}, + {errs: []error{nil}, want: nil}, + {errs: []error{errDoom}, want: errDoom}, + {errs: []error{errDoom, nil}, want: errDoom}, + } + + for _, tc := range cases { + g, ctx := errgroup.WithContext(context.Background()) + + for _, err := range tc.errs { + err := err + g.Go(func() error { return err }) + } + + if err := g.Wait(); err != tc.want { + t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+ + "g.Wait() = %v; want %v", + g, tc.errs, err, tc.want) + } + + canceled := false + select { + case <-ctx.Done(): + canceled = true + default: + } + if !canceled { + t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+ + "ctx.Done() was not closed", + g, tc.errs) + } + } +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 0000000000..e9d2d79a97 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "sync" + + // Use the old context because packages that depend on this one + // (e.g. cloud.google.com/go/...) must run on Go 1.6. + // TODO(jba): update to "context" when possible. + "golang.org/x/net/context" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking only until ctx +// is done. On success, returns nil. On failure, returns ctx.Err() and leaves +// the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: bad release") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go new file mode 100644 index 0000000000..1e3ab75f5d --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package semaphore_test + +import ( + "fmt" + "testing" + + "golang.org/x/net/context" + "golang.org/x/sync/semaphore" +) + +// weighted is an interface matching a subset of *Weighted. It allows +// alternate implementations for testing and benchmarking. +type weighted interface { + Acquire(context.Context, int64) error + TryAcquire(int64) bool + Release(int64) +} + +// semChan implements Weighted using a channel for +// comparing against the condition variable-based implementation. +type semChan chan struct{} + +func newSemChan(n int64) semChan { + return semChan(make(chan struct{}, n)) +} + +func (s semChan) Acquire(_ context.Context, n int64) error { + for i := int64(0); i < n; i++ { + s <- struct{}{} + } + return nil +} + +func (s semChan) TryAcquire(n int64) bool { + if int64(len(s))+n > int64(cap(s)) { + return false + } + + for i := int64(0); i < n; i++ { + s <- struct{}{} + } + return true +} + +func (s semChan) Release(n int64) { + for i := int64(0); i < n; i++ { + <-s + } +} + +// acquireN calls Acquire(size) on sem N times and then calls Release(size) N times. +func acquireN(b *testing.B, sem weighted, size int64, N int) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < N; j++ { + sem.Acquire(context.Background(), size) + } + for j := 0; j < N; j++ { + sem.Release(size) + } + } +} + +// tryAcquireN calls TryAcquire(size) on sem N times and then calls Release(size) N times. +func tryAcquireN(b *testing.B, sem weighted, size int64, N int) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < N; j++ { + if !sem.TryAcquire(size) { + b.Fatalf("TryAcquire(%v) = false, want true", size) + } + } + for j := 0; j < N; j++ { + sem.Release(size) + } + } +} + +func BenchmarkNewSeq(b *testing.B) { + for _, cap := range []int64{1, 128} { + b.Run(fmt.Sprintf("Weighted-%d", cap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = semaphore.NewWeighted(cap) + } + }) + b.Run(fmt.Sprintf("semChan-%d", cap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = newSemChan(cap) + } + }) + } +} + +func BenchmarkAcquireSeq(b *testing.B) { + for _, c := range []struct { + cap, size int64 + N int + }{ + {1, 1, 1}, + {2, 1, 1}, + {16, 1, 1}, + {128, 1, 1}, + {2, 2, 1}, + {16, 2, 8}, + {128, 2, 64}, + {2, 1, 2}, + {16, 8, 2}, + {128, 64, 2}, + } { + for _, w := range []struct { + name string + w weighted + }{ + {"Weighted", semaphore.NewWeighted(c.cap)}, + {"semChan", newSemChan(c.cap)}, + } { + b.Run(fmt.Sprintf("%s-acquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) { + acquireN(b, w.w, c.size, c.N) + }) + b.Run(fmt.Sprintf("%s-tryAcquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) { + tryAcquireN(b, w.w, c.size, c.N) + }) + } + } +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go new file mode 100644 index 0000000000..e75cd79f5b --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go @@ -0,0 +1,84 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package semaphore_test + +import ( + "context" + "fmt" + "log" + "runtime" + + "golang.org/x/sync/semaphore" +) + +// Example_workerPool demonstrates how to use a semaphore to limit the number of +// goroutines working on parallel tasks. +// +// This use of a semaphore mimics a typical “worker pool” pattern, but without +// the need to explicitly shut down idle workers when the work is done. +func Example_workerPool() { + ctx := context.TODO() + + var ( + maxWorkers = runtime.GOMAXPROCS(0) + sem = semaphore.NewWeighted(int64(maxWorkers)) + out = make([]int, 32) + ) + + // Compute the output using up to maxWorkers goroutines at a time. + for i := range out { + // When maxWorkers goroutines are in flight, Acquire blocks until one of the + // workers finishes. + if err := sem.Acquire(ctx, 1); err != nil { + log.Printf("Failed to acquire semaphore: %v", err) + break + } + + go func(i int) { + defer sem.Release(1) + out[i] = collatzSteps(i + 1) + }(i) + } + + // Acquire all of the tokens to wait for any remaining workers to finish. + // + // If you are already waiting for the workers by some other means (such as an + // errgroup.Group), you can omit this final Acquire call. + if err := sem.Acquire(ctx, int64(maxWorkers)); err != nil { + log.Printf("Failed to acquire semaphore: %v", err) + } + + fmt.Println(out) + + // Output: + // [0 1 7 2 5 8 16 3 19 6 14 9 9 17 17 4 12 20 20 7 7 15 15 10 23 10 111 18 18 18 106 5] +} + +// collatzSteps computes the number of steps to reach 1 under the Collatz +// conjecture. (See https://en.wikipedia.org/wiki/Collatz_conjecture.) +func collatzSteps(n int) (steps int) { + if n <= 0 { + panic("nonpositive input") + } + + for ; n > 1; steps++ { + if steps < 0 { + panic("too many steps") + } + + if n%2 == 0 { + n /= 2 + continue + } + + const maxInt = int(^uint(0) >> 1) + if n > (maxInt-1)/3 { + panic("overflow") + } + n = 3*n + 1 + } + + return steps +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_test.go new file mode 100644 index 0000000000..2541b9068f --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_test.go @@ -0,0 +1,171 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package semaphore_test + +import ( + "math/rand" + "runtime" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +const maxSleep = 1 * time.Millisecond + +func HammerWeighted(sem *semaphore.Weighted, n int64, loops int) { + for i := 0; i < loops; i++ { + sem.Acquire(context.Background(), n) + time.Sleep(time.Duration(rand.Int63n(int64(maxSleep/time.Nanosecond))) * time.Nanosecond) + sem.Release(n) + } +} + +func TestWeighted(t *testing.T) { + t.Parallel() + + n := runtime.GOMAXPROCS(0) + loops := 10000 / n + sem := semaphore.NewWeighted(int64(n)) + var wg sync.WaitGroup + wg.Add(n) + for i := 0; i < n; i++ { + i := i + go func() { + defer wg.Done() + HammerWeighted(sem, int64(i), loops) + }() + } + wg.Wait() +} + +func TestWeightedPanic(t *testing.T) { + t.Parallel() + + defer func() { + if recover() == nil { + t.Fatal("release of an unacquired weighted semaphore did not panic") + } + }() + w := semaphore.NewWeighted(1) + w.Release(1) +} + +func TestWeightedTryAcquire(t *testing.T) { + t.Parallel() + + ctx := context.Background() + sem := semaphore.NewWeighted(2) + tries := []bool{} + sem.Acquire(ctx, 1) + tries = append(tries, sem.TryAcquire(1)) + tries = append(tries, sem.TryAcquire(1)) + + sem.Release(2) + + tries = append(tries, sem.TryAcquire(1)) + sem.Acquire(ctx, 1) + tries = append(tries, sem.TryAcquire(1)) + + want := []bool{true, false, true, false} + for i := range tries { + if tries[i] != want[i] { + t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i]) + } + } +} + +func TestWeightedAcquire(t *testing.T) { + t.Parallel() + + ctx := context.Background() + sem := semaphore.NewWeighted(2) + tryAcquire := func(n int64) bool { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + return sem.Acquire(ctx, n) == nil + } + + tries := []bool{} + sem.Acquire(ctx, 1) + tries = append(tries, tryAcquire(1)) + tries = append(tries, tryAcquire(1)) + + sem.Release(2) + + tries = append(tries, tryAcquire(1)) + sem.Acquire(ctx, 1) + tries = append(tries, tryAcquire(1)) + + want := []bool{true, false, true, false} + for i := range tries { + if tries[i] != want[i] { + t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i]) + } + } +} + +func TestWeightedDoesntBlockIfTooBig(t *testing.T) { + t.Parallel() + + const n = 2 + sem := semaphore.NewWeighted(n) + { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go sem.Acquire(ctx, n+1) + } + + g, ctx := errgroup.WithContext(context.Background()) + for i := n * 3; i > 0; i-- { + g.Go(func() error { + err := sem.Acquire(ctx, 1) + if err == nil { + time.Sleep(1 * time.Millisecond) + sem.Release(1) + } + return err + }) + } + if err := g.Wait(); err != nil { + t.Errorf("semaphore.NewWeighted(%v) failed to AcquireCtx(_, 1) with AcquireCtx(_, %v) pending", n, n+1) + } +} + +// TestLargeAcquireDoesntStarve times out if a large call to Acquire starves. +// Merely returning from the test function indicates success. +func TestLargeAcquireDoesntStarve(t *testing.T) { + t.Parallel() + + ctx := context.Background() + n := int64(runtime.GOMAXPROCS(0)) + sem := semaphore.NewWeighted(n) + running := true + + var wg sync.WaitGroup + wg.Add(int(n)) + for i := n; i > 0; i-- { + sem.Acquire(ctx, 1) + go func() { + defer func() { + sem.Release(1) + wg.Done() + }() + for running { + time.Sleep(1 * time.Millisecond) + sem.Release(1) + sem.Acquire(ctx, 1) + } + }() + } + + sem.Acquire(ctx, n) + running = false + sem.Release(n) + wg.Wait() +} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 0000000000..9a4f8d59e0 --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,111 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + delete(g.m, key) + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight_test.go b/vendor/golang.org/x/sync/singleflight/singleflight_test.go new file mode 100644 index 0000000000..5e6f1b328e --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight_test.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestDo(t *testing.T) { + var g Group + v, err, _ := g.Do("key", func() (interface{}, error) { + return "bar", nil + }) + if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { + t.Errorf("Do = %v; want %v", got, want) + } + if err != nil { + t.Errorf("Do error = %v", err) + } +} + +func TestDoErr(t *testing.T) { + var g Group + someErr := errors.New("Some error") + v, err, _ := g.Do("key", func() (interface{}, error) { + return nil, someErr + }) + if err != someErr { + t.Errorf("Do error = %v; want someErr %v", err, someErr) + } + if v != nil { + t.Errorf("unexpected non-nil value %#v", v) + } +} + +func TestDoDupSuppress(t *testing.T) { + var g Group + var wg1, wg2 sync.WaitGroup + c := make(chan string, 1) + var calls int32 + fn := func() (interface{}, error) { + if atomic.AddInt32(&calls, 1) == 1 { + // First invocation. + wg1.Done() + } + v := <-c + c <- v // pump; make available for any future calls + + time.Sleep(10 * time.Millisecond) // let more goroutines enter Do + + return v, nil + } + + const n = 10 + wg1.Add(1) + for i := 0; i < n; i++ { + wg1.Add(1) + wg2.Add(1) + go func() { + defer wg2.Done() + wg1.Done() + v, err, _ := g.Do("key", fn) + if err != nil { + t.Errorf("Do error: %v", err) + return + } + if s, _ := v.(string); s != "bar" { + t.Errorf("Do = %T %v; want %q", v, v, "bar") + } + }() + } + wg1.Wait() + // At least one goroutine is in fn now and all of them have at + // least reached the line before the Do. + c <- "bar" + wg2.Wait() + if got := atomic.LoadInt32(&calls); got <= 0 || got >= n { + t.Errorf("number of calls = %d; want over 0 and less than %d", got, n) + } +} diff --git a/vendor/golang.org/x/sync/syncmap/map.go b/vendor/golang.org/x/sync/syncmap/map.go new file mode 100644 index 0000000000..80e15847ef --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map.go @@ -0,0 +1,372 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package syncmap provides a concurrent map implementation. +// It is a prototype for a proposed addition to the sync package +// in the standard library. +// (https://golang.org/issue/18177) +package syncmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Map is a concurrent map with amortized-constant-time loads, stores, and deletes. +// It is safe for multiple goroutines to call a Map's methods concurrently. +// +// The zero Map is valid and empty. +// +// A Map must not be copied after first use. +type Map struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[interface{}]*entry + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnly struct { + m map[interface{}]*entry + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var expunged = unsafe.Pointer(new(interface{})) + +// An entry is a slot in the map corresponding to a particular key. +type entry struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntry(i interface{}) *entry { + return &entry{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map) Load(key interface{}) (value interface{}, ok bool) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return nil, false + } + return e.load() +} + +func (e *entry) load() (value interface{}, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return nil, false + } + return *(*interface{})(p), true +} + +// Store sets the value for a key. +func (m *Map) Store(key, value interface{}) { + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entry) tryStore(i *interface{}) bool { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + for { + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expunged, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entry) storeLocked(i *interface{}) { + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + } +} + +// Delete deletes the value for a key. +func (m *Map) Delete(key interface{}) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entry) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map) Range(f func(key, value interface{}) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnly) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if read.amended { + read = readOnly{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *Map) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnly{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *Map) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnly) + m.dirty = make(map[interface{}]*entry, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entry) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expunged +} diff --git a/vendor/golang.org/x/sync/syncmap/map_bench_test.go b/vendor/golang.org/x/sync/syncmap/map_bench_test.go new file mode 100644 index 0000000000..b279b4f749 --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map_bench_test.go @@ -0,0 +1,216 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syncmap_test + +import ( + "fmt" + "reflect" + "sync/atomic" + "testing" + + "golang.org/x/sync/syncmap" +) + +type bench struct { + setup func(*testing.B, mapInterface) + perG func(b *testing.B, pb *testing.PB, i int, m mapInterface) +} + +func benchMap(b *testing.B, bench bench) { + for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &syncmap.Map{}} { + b.Run(fmt.Sprintf("%T", m), func(b *testing.B) { + m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface) + if bench.setup != nil { + bench.setup(b, m) + } + + b.ResetTimer() + + var i int64 + b.RunParallel(func(pb *testing.PB) { + id := int(atomic.AddInt64(&i, 1) - 1) + bench.perG(b, pb, id*b.N, m) + }) + }) + } +} + +func BenchmarkLoadMostlyHits(b *testing.B) { + const hits, misses = 1023, 1 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < hits; i++ { + m.LoadOrStore(i, i) + } + // Prime the map to get it into a steady state. + for i := 0; i < hits*2; i++ { + m.Load(i % hits) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Load(i % (hits + misses)) + } + }, + }) +} + +func BenchmarkLoadMostlyMisses(b *testing.B) { + const hits, misses = 1, 1023 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < hits; i++ { + m.LoadOrStore(i, i) + } + // Prime the map to get it into a steady state. + for i := 0; i < hits*2; i++ { + m.Load(i % hits) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Load(i % (hits + misses)) + } + }, + }) +} + +func BenchmarkLoadOrStoreBalanced(b *testing.B) { + const hits, misses = 128, 128 + + benchMap(b, bench{ + setup: func(b *testing.B, m mapInterface) { + if _, ok := m.(*DeepCopyMap); ok { + b.Skip("DeepCopyMap has quadratic running time.") + } + for i := 0; i < hits; i++ { + m.LoadOrStore(i, i) + } + // Prime the map to get it into a steady state. + for i := 0; i < hits*2; i++ { + m.Load(i % hits) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + j := i % (hits + misses) + if j < hits { + if _, ok := m.LoadOrStore(j, i); !ok { + b.Fatalf("unexpected miss for %v", j) + } + } else { + if v, loaded := m.LoadOrStore(i, i); loaded { + b.Fatalf("failed to store %v: existing value %v", i, v) + } + } + } + }, + }) +} + +func BenchmarkLoadOrStoreUnique(b *testing.B) { + benchMap(b, bench{ + setup: func(b *testing.B, m mapInterface) { + if _, ok := m.(*DeepCopyMap); ok { + b.Skip("DeepCopyMap has quadratic running time.") + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.LoadOrStore(i, i) + } + }, + }) +} + +func BenchmarkLoadOrStoreCollision(b *testing.B) { + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + m.LoadOrStore(0, 0) + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.LoadOrStore(0, 0) + } + }, + }) +} + +func BenchmarkRange(b *testing.B) { + const mapSize = 1 << 10 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < mapSize; i++ { + m.Store(i, i) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Range(func(_, _ interface{}) bool { return true }) + } + }, + }) +} + +// BenchmarkAdversarialAlloc tests performance when we store a new value +// immediately whenever the map is promoted to clean and otherwise load a +// unique, missing key. +// +// This forces the Load calls to always acquire the map's mutex. +func BenchmarkAdversarialAlloc(b *testing.B) { + benchMap(b, bench{ + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + var stores, loadsSinceStore int64 + for ; pb.Next(); i++ { + m.Load(i) + if loadsSinceStore++; loadsSinceStore > stores { + m.LoadOrStore(i, stores) + loadsSinceStore = 0 + stores++ + } + } + }, + }) +} + +// BenchmarkAdversarialDelete tests performance when we periodically delete +// one key and add a different one in a large map. +// +// This forces the Load calls to always acquire the map's mutex and periodically +// makes a full copy of the map despite changing only one entry. +func BenchmarkAdversarialDelete(b *testing.B) { + const mapSize = 1 << 10 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < mapSize; i++ { + m.Store(i, i) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Load(i) + + if i%mapSize == 0 { + m.Range(func(k, _ interface{}) bool { + m.Delete(k) + return false + }) + m.Store(i, i) + } + } + }, + }) +} diff --git a/vendor/golang.org/x/sync/syncmap/map_reference_test.go b/vendor/golang.org/x/sync/syncmap/map_reference_test.go new file mode 100644 index 0000000000..923c51b70e --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map_reference_test.go @@ -0,0 +1,151 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syncmap_test + +import ( + "sync" + "sync/atomic" +) + +// This file contains reference map implementations for unit-tests. + +// mapInterface is the interface Map implements. +type mapInterface interface { + Load(interface{}) (interface{}, bool) + Store(key, value interface{}) + LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) + Delete(interface{}) + Range(func(key, value interface{}) (shouldContinue bool)) +} + +// RWMutexMap is an implementation of mapInterface using a sync.RWMutex. +type RWMutexMap struct { + mu sync.RWMutex + dirty map[interface{}]interface{} +} + +func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) { + m.mu.RLock() + value, ok = m.dirty[key] + m.mu.RUnlock() + return +} + +func (m *RWMutexMap) Store(key, value interface{}) { + m.mu.Lock() + if m.dirty == nil { + m.dirty = make(map[interface{}]interface{}) + } + m.dirty[key] = value + m.mu.Unlock() +} + +func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + m.mu.Lock() + actual, loaded = m.dirty[key] + if !loaded { + actual = value + if m.dirty == nil { + m.dirty = make(map[interface{}]interface{}) + } + m.dirty[key] = value + } + m.mu.Unlock() + return actual, loaded +} + +func (m *RWMutexMap) Delete(key interface{}) { + m.mu.Lock() + delete(m.dirty, key) + m.mu.Unlock() +} + +func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) { + m.mu.RLock() + keys := make([]interface{}, 0, len(m.dirty)) + for k := range m.dirty { + keys = append(keys, k) + } + m.mu.RUnlock() + + for _, k := range keys { + v, ok := m.Load(k) + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +// DeepCopyMap is an implementation of mapInterface using a Mutex and +// atomic.Value. It makes deep copies of the map on every write to avoid +// acquiring the Mutex in Load. +type DeepCopyMap struct { + mu sync.Mutex + clean atomic.Value +} + +func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + value, ok = clean[key] + return value, ok +} + +func (m *DeepCopyMap) Store(key, value interface{}) { + m.mu.Lock() + dirty := m.dirty() + dirty[key] = value + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + actual, loaded = clean[key] + if loaded { + return actual, loaded + } + + m.mu.Lock() + // Reload clean in case it changed while we were waiting on m.mu. + clean, _ = m.clean.Load().(map[interface{}]interface{}) + actual, loaded = clean[key] + if !loaded { + dirty := m.dirty() + dirty[key] = value + actual = value + m.clean.Store(dirty) + } + m.mu.Unlock() + return actual, loaded +} + +func (m *DeepCopyMap) Delete(key interface{}) { + m.mu.Lock() + dirty := m.dirty() + delete(dirty, key) + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + for k, v := range clean { + if !f(k, v) { + break + } + } +} + +func (m *DeepCopyMap) dirty() map[interface{}]interface{} { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + dirty := make(map[interface{}]interface{}, len(clean)+1) + for k, v := range clean { + dirty[k] = v + } + return dirty +} diff --git a/vendor/golang.org/x/sync/syncmap/map_test.go b/vendor/golang.org/x/sync/syncmap/map_test.go new file mode 100644 index 0000000000..c883f176f5 --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map_test.go @@ -0,0 +1,172 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syncmap_test + +import ( + "math/rand" + "reflect" + "runtime" + "sync" + "testing" + "testing/quick" + + "golang.org/x/sync/syncmap" +) + +type mapOp string + +const ( + opLoad = mapOp("Load") + opStore = mapOp("Store") + opLoadOrStore = mapOp("LoadOrStore") + opDelete = mapOp("Delete") +) + +var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opDelete} + +// mapCall is a quick.Generator for calls on mapInterface. +type mapCall struct { + op mapOp + k, v interface{} +} + +func (c mapCall) apply(m mapInterface) (interface{}, bool) { + switch c.op { + case opLoad: + return m.Load(c.k) + case opStore: + m.Store(c.k, c.v) + return nil, false + case opLoadOrStore: + return m.LoadOrStore(c.k, c.v) + case opDelete: + m.Delete(c.k) + return nil, false + default: + panic("invalid mapOp") + } +} + +type mapResult struct { + value interface{} + ok bool +} + +func randValue(r *rand.Rand) interface{} { + b := make([]byte, r.Intn(4)) + for i := range b { + b[i] = 'a' + byte(rand.Intn(26)) + } + return string(b) +} + +func (mapCall) Generate(r *rand.Rand, size int) reflect.Value { + c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)} + switch c.op { + case opStore, opLoadOrStore: + c.v = randValue(r) + } + return reflect.ValueOf(c) +} + +func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) { + for _, c := range calls { + v, ok := c.apply(m) + results = append(results, mapResult{v, ok}) + } + + final = make(map[interface{}]interface{}) + m.Range(func(k, v interface{}) bool { + final[k] = v + return true + }) + + return results, final +} + +func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { + return applyCalls(new(syncmap.Map), calls) +} + +func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { + return applyCalls(new(RWMutexMap), calls) +} + +func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { + return applyCalls(new(DeepCopyMap), calls) +} + +func TestMapMatchesRWMutex(t *testing.T) { + if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil { + t.Error(err) + } +} + +func TestMapMatchesDeepCopy(t *testing.T) { + if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil { + t.Error(err) + } +} + +func TestConcurrentRange(t *testing.T) { + const mapSize = 1 << 10 + + m := new(syncmap.Map) + for n := int64(1); n <= mapSize; n++ { + m.Store(n, int64(n)) + } + + done := make(chan struct{}) + var wg sync.WaitGroup + defer func() { + close(done) + wg.Wait() + }() + for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- { + r := rand.New(rand.NewSource(g)) + wg.Add(1) + go func(g int64) { + defer wg.Done() + for i := int64(0); ; i++ { + select { + case <-done: + return + default: + } + for n := int64(1); n < mapSize; n++ { + if r.Int63n(mapSize) == 0 { + m.Store(n, n*i*g) + } else { + m.Load(n) + } + } + } + }(g) + } + + iters := 1 << 10 + if testing.Short() { + iters = 16 + } + for n := iters; n > 0; n-- { + seen := make(map[int64]bool, mapSize) + + m.Range(func(ki, vi interface{}) bool { + k, v := ki.(int64), vi.(int64) + if v%k != 0 { + t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) + } + if seen[k] { + t.Fatalf("Range visited key %v twice", k) + } + seen[k] = true + return true + }) + + if len(seen) != mapSize { + t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) + } + } +} From 20b95b4352186da066d4ba6b811855e4d614158d Mon Sep 17 00:00:00 2001 From: Mani Bindra Date: Tue, 3 Jul 2018 05:06:19 +0530 Subject: [PATCH 49/74] initial set of changes to fix issue (#3390) --- .../kubernetesmaster-audit-policy.yaml | 0 pkg/acsengine/addons.go | 10 +++++----- pkg/acsengine/defaults-apiserver.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) rename parts/k8s/{manifests => addons}/kubernetesmaster-audit-policy.yaml (100%) diff --git a/parts/k8s/manifests/kubernetesmaster-audit-policy.yaml b/parts/k8s/addons/kubernetesmaster-audit-policy.yaml similarity index 100% rename from parts/k8s/manifests/kubernetesmaster-audit-policy.yaml rename to parts/k8s/addons/kubernetesmaster-audit-policy.yaml diff --git a/pkg/acsengine/addons.go b/pkg/acsengine/addons.go index 811fc9c13b..745b0e9347 100644 --- a/pkg/acsengine/addons.go +++ b/pkg/acsengine/addons.go @@ -117,6 +117,11 @@ func kubernetesAddonSettingsInit(profile *api.Properties) []kubernetesFeatureSet "azure-cni-networkmonitor.yaml", profile.OrchestratorProfile.IsAzureCNI(), }, + { + "kubernetesmaster-audit-policy.yaml", + "audit-policy.yaml", + common.IsKubernetesVersionGe(profile.OrchestratorProfile.OrchestratorVersion, "1.8.0"), + }, } } @@ -142,11 +147,6 @@ func kubernetesManifestSettingsInit(profile *api.Properties) []kubernetesFeature "pod-security-policy.yaml", helpers.IsTrueBoolPointer(profile.OrchestratorProfile.KubernetesConfig.EnablePodSecurityPolicy), }, - { - "kubernetesmaster-audit-policy.yaml", - "audit-policy.yaml", - common.IsKubernetesVersionGe(profile.OrchestratorProfile.OrchestratorVersion, "1.8.0"), - }, { "kubernetesmaster-kube-apiserver.yaml", "kube-apiserver.yaml", diff --git a/pkg/acsengine/defaults-apiserver.go b/pkg/acsengine/defaults-apiserver.go index 34f278f1ee..b62e4d05b6 100644 --- a/pkg/acsengine/defaults-apiserver.go +++ b/pkg/acsengine/defaults-apiserver.go @@ -79,7 +79,7 @@ func setAPIServerConfig(cs *api.ContainerService) { // Audit Policy configuration if common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.8.0") { - staticAPIServerConfig["--audit-policy-file"] = "/etc/kubernetes/manifests/audit-policy.yaml" + defaultAPIServerConfig["--audit-policy-file"] = "/etc/kubernetes/addons/audit-policy.yaml" } // RBAC configuration From 9993e58818f8355be96278c15006e3d6d2be235f Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Mon, 2 Jul 2018 17:01:54 -0700 Subject: [PATCH 50/74] Cleanup error usage (#3386) --- cmd/dcos-upgrade.go | 32 +- cmd/deploy.go | 44 +- cmd/generate.go | 14 +- cmd/root.go | 20 +- cmd/scale.go | 78 +-- cmd/upgrade.go | 27 +- glide.lock | 6 +- glide.yaml | 2 + pkg/acsengine/defaults.go | 3 +- pkg/acsengine/engine.go | 23 +- pkg/acsengine/template_generator.go | 6 +- pkg/acsengine/tenantid.go | 14 +- pkg/api/apiloader.go | 13 +- pkg/api/common/helper.go | 30 +- pkg/api/common/net.go | 5 +- pkg/api/orchestrators.go | 22 +- pkg/api/strictjson.go | 5 +- pkg/armhelpers/azureclient.go | 27 +- pkg/armhelpers/utils/util.go | 19 +- pkg/operations/cordondrainvm.go | 8 +- pkg/operations/deletevm.go | 3 +- vendor/github.com/pkg/errors/.gitignore | 24 + vendor/github.com/pkg/errors/.travis.yml | 11 + vendor/github.com/pkg/errors/LICENSE | 23 + vendor/github.com/pkg/errors/README.md | 52 ++ vendor/github.com/pkg/errors/appveyor.yml | 32 ++ vendor/github.com/pkg/errors/bench_test.go | 59 ++ vendor/github.com/pkg/errors/errors.go | 269 ++++++++++ vendor/github.com/pkg/errors/errors_test.go | 226 ++++++++ vendor/github.com/pkg/errors/example_test.go | 205 +++++++ vendor/github.com/pkg/errors/format_test.go | 535 +++++++++++++++++++ vendor/github.com/pkg/errors/stack.go | 178 ++++++ vendor/github.com/pkg/errors/stack_test.go | 292 ++++++++++ 33 files changed, 2113 insertions(+), 194 deletions(-) create mode 100644 vendor/github.com/pkg/errors/.gitignore create mode 100644 vendor/github.com/pkg/errors/.travis.yml create mode 100644 vendor/github.com/pkg/errors/LICENSE create mode 100644 vendor/github.com/pkg/errors/README.md create mode 100644 vendor/github.com/pkg/errors/appveyor.yml create mode 100644 vendor/github.com/pkg/errors/bench_test.go create mode 100644 vendor/github.com/pkg/errors/errors.go create mode 100644 vendor/github.com/pkg/errors/errors_test.go create mode 100644 vendor/github.com/pkg/errors/example_test.go create mode 100644 vendor/github.com/pkg/errors/format_test.go create mode 100644 vendor/github.com/pkg/errors/stack.go create mode 100644 vendor/github.com/pkg/errors/stack_test.go diff --git a/cmd/dcos-upgrade.go b/cmd/dcos-upgrade.go index e250f25cd6..301f6a1efb 100644 --- a/cmd/dcos-upgrade.go +++ b/cmd/dcos-upgrade.go @@ -2,7 +2,6 @@ package cmd import ( "encoding/json" - "fmt" "io/ioutil" "os" "path" @@ -15,6 +14,7 @@ import ( "github.com/Azure/acs-engine/pkg/i18n" "github.com/Azure/acs-engine/pkg/operations/dcosupgrade" "github.com/leonelquinteros/gotext" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -76,28 +76,28 @@ func (uc *dcosUpgradeCmd) validate(cmd *cobra.Command) error { uc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf("error loading translation files: %s", err.Error()) + return errors.Wrap(err, "error loading translation files") } if len(uc.resourceGroupName) == 0 { cmd.Usage() - return fmt.Errorf("--resource-group must be specified") + return errors.New("--resource-group must be specified") } if len(uc.location) == 0 { cmd.Usage() - return fmt.Errorf("--location must be specified") + return errors.New("--location must be specified") } uc.location = helpers.NormalizeAzureRegion(uc.location) if len(uc.upgradeVersion) == 0 { cmd.Usage() - return fmt.Errorf("--upgrade-version must be specified") + return errors.New("--upgrade-version must be specified") } if len(uc.deploymentDirectory) == 0 { cmd.Usage() - return fmt.Errorf("--deployment-dir must be specified") + return errors.New("--deployment-dir must be specified") } if len(uc.sshPrivateKeyPath) == 0 { @@ -105,11 +105,11 @@ func (uc *dcosUpgradeCmd) validate(cmd *cobra.Command) error { } if uc.sshPrivateKey, err = ioutil.ReadFile(uc.sshPrivateKeyPath); err != nil { cmd.Usage() - return fmt.Errorf("ssh-private-key-path must be specified: %s", err) + return errors.Wrap(err, "ssh-private-key-path must be specified") } if err = uc.authArgs.validateAuthArgs(); err != nil { - return fmt.Errorf("%s", err) + return err } return nil } @@ -118,19 +118,19 @@ func (uc *dcosUpgradeCmd) loadCluster(cmd *cobra.Command) error { var err error if uc.client, err = uc.authArgs.getClient(); err != nil { - return fmt.Errorf("Failed to get client: %s", err) + return errors.Wrap(err, "Failed to get client") } _, err = uc.client.EnsureResourceGroup(uc.resourceGroupName, uc.location, nil) if err != nil { - return fmt.Errorf("Error ensuring resource group: %s", err) + return errors.Wrap(err, "Error ensuring resource group") } // load apimodel from the deployment directory apiModelPath := path.Join(uc.deploymentDirectory, "apimodel.json") if _, err = os.Stat(apiModelPath); os.IsNotExist(err) { - return fmt.Errorf("specified api model does not exist (%s)", apiModelPath) + return errors.Errorf("specified api model does not exist (%s)", apiModelPath) } apiloader := &api.Apiloader{ @@ -140,24 +140,24 @@ func (uc *dcosUpgradeCmd) loadCluster(cmd *cobra.Command) error { } uc.containerService, uc.apiVersion, err = apiloader.LoadContainerServiceFromFile(apiModelPath, true, true, nil) if err != nil { - return fmt.Errorf("error parsing the api model: %s", err.Error()) + return errors.Wrap(err, "error parsing the api model") } uc.currentDcosVersion = uc.containerService.Properties.OrchestratorProfile.OrchestratorVersion if uc.currentDcosVersion == uc.upgradeVersion { - return fmt.Errorf("already running DCOS %s", uc.upgradeVersion) + return errors.Errorf("already running DCOS %s", uc.upgradeVersion) } if len(uc.containerService.Location) == 0 { uc.containerService.Location = uc.location } else if uc.containerService.Location != uc.location { - return fmt.Errorf("--location does not match api model location") + return errors.New("--location does not match api model location") } // get available upgrades for container service orchestratorInfo, err := api.GetOrchestratorVersionProfile(uc.containerService.Properties.OrchestratorProfile) if err != nil { - return fmt.Errorf("error getting list of available upgrades: %s", err.Error()) + return errors.Wrap(err, "error getting list of available upgrades") } // add the current version if upgrade has failed orchestratorInfo.Upgrades = append(orchestratorInfo.Upgrades, &api.OrchestratorProfile{ @@ -174,7 +174,7 @@ func (uc *dcosUpgradeCmd) loadCluster(cmd *cobra.Command) error { } } if !found { - return fmt.Errorf("upgrade to DCOS %s is not supported", uc.upgradeVersion) + return errors.Errorf("upgrade to DCOS %s is not supported", uc.upgradeVersion) } // Read name suffix to identify nodes in the resource group that belong diff --git a/cmd/deploy.go b/cmd/deploy.go index e47457bf77..e87ee9268f 100644 --- a/cmd/deploy.go +++ b/cmd/deploy.go @@ -1,7 +1,6 @@ package cmd import ( - "errors" "fmt" "io/ioutil" "math/rand" @@ -24,6 +23,7 @@ import ( "github.com/Azure/acs-engine/pkg/i18n" "github.com/Azure/azure-sdk-for-go/arm/graphrbac" "github.com/Azure/go-autorest/autorest/to" + "github.com/pkg/errors" ) const ( @@ -74,10 +74,10 @@ func newDeployCmd() *cobra.Command { Long: deployLongDescription, RunE: func(cmd *cobra.Command, args []string) error { if err := dc.validateArgs(cmd, args); err != nil { - log.Fatalf(fmt.Sprintf("error validating deployCmd: %s", err.Error())) + log.Fatalf("error validating deployCmd: %s", err.Error()) } if err := dc.mergeAPIModel(); err != nil { - log.Fatalf(fmt.Sprintf("error merging API model in deployCmd: %s", err.Error())) + log.Fatalf("error merging API model in deployCmd: %s", err.Error()) } if err := dc.loadAPIModel(cmd, args); err != nil { log.Fatalln("failed to load apimodel: %s", err.Error()) @@ -111,7 +111,7 @@ func (dc *deployCmd) validateArgs(cmd *cobra.Command, args []string) error { dc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf(fmt.Sprintf("error loading translation files: %s", err.Error())) + return errors.Wrap(err, "error loading translation files") } if dc.apimodelPath == "" { @@ -119,19 +119,19 @@ func (dc *deployCmd) validateArgs(cmd *cobra.Command, args []string) error { dc.apimodelPath = args[0] } else if len(args) > 1 { cmd.Usage() - return fmt.Errorf(fmt.Sprintf("too many arguments were provided to 'deploy'")) + return errors.New("too many arguments were provided to 'deploy'") } else { cmd.Usage() - return fmt.Errorf(fmt.Sprintf("--api-model was not supplied, nor was one specified as a positional argument")) + return errors.New("--api-model was not supplied, nor was one specified as a positional argument") } } if _, err := os.Stat(dc.apimodelPath); os.IsNotExist(err) { - return fmt.Errorf(fmt.Sprintf("specified api model does not exist (%s)", dc.apimodelPath)) + return errors.Errorf("specified api model does not exist (%s)", dc.apimodelPath) } if dc.location == "" { - return fmt.Errorf(fmt.Sprintf("--location must be specified")) + return errors.New("--location must be specified") } dc.location = helpers.NormalizeAzureRegion(dc.location) @@ -149,7 +149,7 @@ func (dc *deployCmd) mergeAPIModel() error { // overrides the api model and generates a new file dc.apimodelPath, err = transform.MergeValuesWithAPIModel(dc.apimodelPath, m) if err != nil { - return fmt.Errorf(fmt.Sprintf("error merging --set values with the api model: %s", err.Error())) + return errors.Wrap(err, "error merging --set values with the api model: %s") } log.Infoln(fmt.Sprintf("new api model file has been generated during merge: %s", dc.apimodelPath)) @@ -172,7 +172,7 @@ func (dc *deployCmd) loadAPIModel(cmd *cobra.Command, args []string) error { // do not validate when initially loading the apimodel, validation is done later after autofilling values dc.containerService, dc.apiVersion, err = apiloader.LoadContainerServiceFromFile(dc.apimodelPath, false, false, nil) if err != nil { - return fmt.Errorf(fmt.Sprintf("error parsing the api model: %s", err.Error())) + return errors.Wrap(err, "error parsing the api model") } if dc.outputDirectory == "" { @@ -190,10 +190,10 @@ func (dc *deployCmd) loadAPIModel(cmd *cobra.Command, args []string) error { if dc.caCertificatePath != "" { if caCertificateBytes, err = ioutil.ReadFile(dc.caCertificatePath); err != nil { - return fmt.Errorf(fmt.Sprintf("failed to read CA certificate file: %s", err.Error())) + return errors.Wrap(err, "failed to read CA certificate file") } if caKeyBytes, err = ioutil.ReadFile(dc.caPrivateKeyPath); err != nil { - return fmt.Errorf(fmt.Sprintf("failed to read CA private key file: %s", err.Error())) + return errors.Wrap(err, "failed to read CA private key file") } prop := dc.containerService.Properties @@ -207,16 +207,16 @@ func (dc *deployCmd) loadAPIModel(cmd *cobra.Command, args []string) error { if dc.containerService.Location == "" { dc.containerService.Location = dc.location } else if dc.containerService.Location != dc.location { - return fmt.Errorf(fmt.Sprintf("--location does not match api model location")) + return errors.New("--location does not match api model location") } if err = dc.authArgs.validateAuthArgs(); err != nil { - return fmt.Errorf("%s", err) + return err } dc.client, err = dc.authArgs.getClient() if err != nil { - return fmt.Errorf("failed to get client: %s", err.Error()) + return errors.Wrap(err, "failed to get client") } if err = autofillApimodel(dc); err != nil { @@ -239,11 +239,11 @@ func autofillApimodel(dc *deployCmd) error { } if dc.dnsPrefix != "" && dc.containerService.Properties.MasterProfile.DNSPrefix != "" { - return fmt.Errorf("invalid configuration: the apimodel masterProfile.dnsPrefix and --dns-prefix were both specified") + return errors.New("invalid configuration: the apimodel masterProfile.dnsPrefix and --dns-prefix were both specified") } if dc.containerService.Properties.MasterProfile.DNSPrefix == "" { if dc.dnsPrefix == "" { - return fmt.Errorf("apimodel: missing masterProfile.dnsPrefix and --dns-prefix was not specified") + return errors.New("apimodel: missing masterProfile.dnsPrefix and --dns-prefix was not specified") } log.Warnf("apimodel: missing masterProfile.dnsPrefix will use %q", dc.dnsPrefix) dc.containerService.Properties.MasterProfile.DNSPrefix = dc.dnsPrefix @@ -259,7 +259,7 @@ func autofillApimodel(dc *deployCmd) error { } if _, err := os.Stat(dc.outputDirectory); !dc.forceOverwrite && err == nil { - return fmt.Errorf("Output directory already exists and forceOverwrite flag is not set: %s", dc.outputDirectory) + return errors.Errorf("Output directory already exists and forceOverwrite flag is not set: %s", dc.outputDirectory) } if dc.resourceGroup == "" { @@ -267,7 +267,7 @@ func autofillApimodel(dc *deployCmd) error { log.Warnf("--resource-group was not specified. Using the DNS prefix from the apimodel as the resource group name: %s", dnsPrefix) dc.resourceGroup = dnsPrefix if dc.location == "" { - return fmt.Errorf("--resource-group was not specified. --location must be specified in case the resource group needs creation") + return errors.New("--resource-group was not specified. --location must be specified in case the resource group needs creation") } } @@ -279,7 +279,7 @@ func autofillApimodel(dc *deployCmd) error { } _, publicKey, err := acsengine.CreateSaveSSH(dc.containerService.Properties.LinuxProfile.AdminUsername, dc.outputDirectory, translator) if err != nil { - return fmt.Errorf("Failed to generate SSH Key: %s", err.Error()) + return errors.Wrap(err, "Failed to generate SSH Key") } dc.containerService.Properties.LinuxProfile.SSH.PublicKeys = []api.PublicKey{{KeyData: publicKey}} @@ -321,7 +321,7 @@ func autofillApimodel(dc *deployCmd) error { } applicationID, servicePrincipalObjectID, secret, err := dc.client.CreateApp(appName, appURL, replyURLs, requiredResourceAccess) if err != nil { - return fmt.Errorf("apimodel invalid: ServicePrincipalProfile was empty, and we failed to create valid credentials: %q", err) + return errors.Wrap(err, "apimodel invalid: ServicePrincipalProfile was empty, and we failed to create valid credentials") } log.Warnf("created application with applicationID (%s) and servicePrincipalObjectID (%s).", applicationID, servicePrincipalObjectID) @@ -329,7 +329,7 @@ func autofillApimodel(dc *deployCmd) error { err = dc.client.CreateRoleAssignmentSimple(dc.resourceGroup, servicePrincipalObjectID) if err != nil { - return fmt.Errorf("apimodel: could not create or assign ServicePrincipal: %q", err) + return errors.Wrap(err, "apimodel: could not create or assign ServicePrincipal") } diff --git a/cmd/generate.go b/cmd/generate.go index 694163696c..0867ff321a 100644 --- a/cmd/generate.go +++ b/cmd/generate.go @@ -1,7 +1,6 @@ package cmd import ( - "errors" "fmt" "io/ioutil" "os" @@ -12,6 +11,7 @@ import ( "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/acs-engine/pkg/i18n" "github.com/leonelquinteros/gotext" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -80,7 +80,7 @@ func (gc *generateCmd) validate(cmd *cobra.Command, args []string) error { gc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf(fmt.Sprintf("error loading translation files: %s", err.Error())) + return errors.Wrap(err, "error loading translation files") } if gc.apimodelPath == "" { @@ -96,7 +96,7 @@ func (gc *generateCmd) validate(cmd *cobra.Command, args []string) error { } if _, err := os.Stat(gc.apimodelPath); os.IsNotExist(err) { - return fmt.Errorf(fmt.Sprintf("specified api model does not exist (%s)", gc.apimodelPath)) + return errors.Errorf("specified api model does not exist (%s)", gc.apimodelPath) } return nil @@ -113,7 +113,7 @@ func (gc *generateCmd) mergeAPIModel() error { // overrides the api model and generates a new file gc.apimodelPath, err = transform.MergeValuesWithAPIModel(gc.apimodelPath, m) if err != nil { - return fmt.Errorf(fmt.Sprintf("error merging --set values with the api model: %s", err.Error())) + return errors.Wrap(err, "error merging --set values with the api model") } log.Infoln(fmt.Sprintf("new api model file has been generated during merge: %s", gc.apimodelPath)) @@ -134,7 +134,7 @@ func (gc *generateCmd) loadAPIModel(cmd *cobra.Command, args []string) error { } gc.containerService, gc.apiVersion, err = apiloader.LoadContainerServiceFromFile(gc.apimodelPath, true, false, nil) if err != nil { - return fmt.Errorf(fmt.Sprintf("error parsing the api model: %s", err.Error())) + return errors.Wrap(err, "error parsing the api model") } if gc.outputDirectory == "" { @@ -152,10 +152,10 @@ func (gc *generateCmd) loadAPIModel(cmd *cobra.Command, args []string) error { } if gc.caCertificatePath != "" { if caCertificateBytes, err = ioutil.ReadFile(gc.caCertificatePath); err != nil { - return fmt.Errorf(fmt.Sprintf("failed to read CA certificate file: %s", err.Error())) + return errors.Wrap(err, "failed to read CA certificate file") } if caKeyBytes, err = ioutil.ReadFile(gc.caPrivateKeyPath); err != nil { - return fmt.Errorf(fmt.Sprintf("failed to read CA private key file: %s", err.Error())) + return errors.Wrap(err, "failed to read CA private key file") } prop := gc.containerService.Properties diff --git a/cmd/root.go b/cmd/root.go index e1d6af7f09..a95c6578f8 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -1,11 +1,11 @@ package cmd import ( - "fmt" "os" "github.com/Azure/acs-engine/pkg/armhelpers" "github.com/Azure/go-autorest/autorest/azure" + "github.com/pkg/errors" uuid "github.com/satori/go.uuid" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -49,12 +49,12 @@ func NewRootCmd() *cobra.Command { var completionCmd = &cobra.Command{ Use: "completion", Short: "Generates bash completion scripts", - Long: `To load completion run - + Long: `To load completion run + source <(acs-engine completion) - + To configure your bash shell to load completions for each session, add this to your bashrc - + # ~/.bashrc or ~/.profile source <(acs-engine completion) `, @@ -98,22 +98,22 @@ func (authArgs *authArgs) validateAuthArgs() error { if authArgs.AuthMethod == "client_secret" { if authArgs.ClientID.String() == "00000000-0000-0000-0000-000000000000" || authArgs.ClientSecret == "" { - return fmt.Errorf(`--client-id and --client-secret must be specified when --auth-method="client_secret"`) + return errors.New(`--client-id and --client-secret must be specified when --auth-method="client_secret"`) } // try parse the UUID } else if authArgs.AuthMethod == "client_certificate" { if authArgs.ClientID.String() == "00000000-0000-0000-0000-000000000000" || authArgs.CertificatePath == "" || authArgs.PrivateKeyPath == "" { - return fmt.Errorf(`--client-id and --certificate-path, and --private-key-path must be specified when --auth-method="client_certificate"`) + return errors.New(`--client-id and --certificate-path, and --private-key-path must be specified when --auth-method="client_certificate"`) } } if authArgs.SubscriptionID.String() == "00000000-0000-0000-0000-000000000000" { - return fmt.Errorf("--subscription-id is required (and must be a valid UUID)") + return errors.New("--subscription-id is required (and must be a valid UUID)") } _, err := azure.EnvironmentFromName(authArgs.RawAzureEnvironment) if err != nil { - return fmt.Errorf("failed to parse --azure-env as a valid target Azure cloud environment") + return errors.New("failed to parse --azure-env as a valid target Azure cloud environment") } return nil } @@ -132,7 +132,7 @@ func (authArgs *authArgs) getClient() (*armhelpers.AzureClient, error) { case "client_certificate": client, err = armhelpers.NewAzureClientWithClientCertificateFile(env, authArgs.SubscriptionID.String(), authArgs.ClientID.String(), authArgs.CertificatePath, authArgs.PrivateKeyPath) default: - return nil, fmt.Errorf("--auth-method: ERROR: method unsupported. method=%q", authArgs.AuthMethod) + return nil, errors.Errorf("--auth-method: ERROR: method unsupported. method=%q", authArgs.AuthMethod) } if err != nil { return nil, err diff --git a/cmd/scale.go b/cmd/scale.go index df9d39999e..63c69bb043 100644 --- a/cmd/scale.go +++ b/cmd/scale.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/acs-engine/pkg/openshift/filesystem" "github.com/Azure/acs-engine/pkg/operations" "github.com/leonelquinteros/gotext" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -90,29 +91,29 @@ func (sc *scaleCmd) validate(cmd *cobra.Command) error { sc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf("error loading translation files: %s", err.Error()) + return errors.Wrap(err, "error loading translation files") } if sc.resourceGroupName == "" { cmd.Usage() - return fmt.Errorf("--resource-group must be specified") + return errors.New("--resource-group must be specified") } if sc.location == "" { cmd.Usage() - return fmt.Errorf("--location must be specified") + return errors.New("--location must be specified") } sc.location = helpers.NormalizeAzureRegion(sc.location) if sc.newDesiredAgentCount == 0 { cmd.Usage() - return fmt.Errorf("--new-node-count must be specified") + return errors.New("--new-node-count must be specified") } if sc.deploymentDirectory == "" { cmd.Usage() - return fmt.Errorf("--deployment-dir must be specified") + return errors.New("--deployment-dir must be specified") } return nil @@ -127,7 +128,7 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { } if sc.client, err = sc.authArgs.getClient(); err != nil { - return fmt.Errorf("failed to get client: %s", err.Error()) + return errors.Wrap(err, "failed to get client") } _, err = sc.client.EnsureResourceGroup(sc.resourceGroupName, sc.location, nil) @@ -139,7 +140,7 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { sc.apiModelPath = path.Join(sc.deploymentDirectory, "apimodel.json") if _, err = os.Stat(sc.apiModelPath); os.IsNotExist(err) { - return fmt.Errorf("specified api model does not exist (%s)", sc.apiModelPath) + return errors.Errorf("specified api model does not exist (%s)", sc.apiModelPath) } apiloader := &api.Apiloader{ @@ -149,25 +150,25 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { } sc.containerService, sc.apiVersion, err = apiloader.LoadContainerServiceFromFile(sc.apiModelPath, true, true, nil) if err != nil { - return fmt.Errorf("error parsing the api model: %s", err.Error()) + return errors.Wrap(err, "error parsing the api model") } if sc.containerService.Location == "" { sc.containerService.Location = sc.location } else if sc.containerService.Location != sc.location { - return fmt.Errorf("--location does not match api model location") + return errors.New("--location does not match api model location") } if sc.agentPoolToScale == "" { agentPoolCount := len(sc.containerService.Properties.AgentPoolProfiles) if agentPoolCount > 1 { - return fmt.Errorf("--node-pool is required if more than one agent pool is defined in the container service") + return errors.New("--node-pool is required if more than one agent pool is defined in the container service") } else if agentPoolCount == 1 { sc.agentPool = sc.containerService.Properties.AgentPoolProfiles[0] sc.agentPoolIndex = 0 sc.agentPoolToScale = sc.containerService.Properties.AgentPoolProfiles[0].Name } else { - return fmt.Errorf("No node pools found to scale") + return errors.New("No node pools found to scale") } } else { agentPoolIndex := -1 @@ -179,7 +180,7 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { } } if agentPoolIndex == -1 { - return fmt.Errorf("node pool %s was not found in the deployed api model", sc.agentPoolToScale) + return errors.Errorf("node pool %s was not found in the deployed api model", sc.agentPoolToScale) } } @@ -200,10 +201,10 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { if err := sc.validate(cmd); err != nil { - return fmt.Errorf("failed to validate scale command: %s", err.Error()) + return errors.Wrap(err, "failed to validate scale command") } if err := sc.load(cmd); err != nil { - return fmt.Errorf("failed to load existing container service: %s", err.Error()) + return errors.Wrap(err, "failed to load existing container service") } orchestratorInfo := sc.containerService.Properties.OrchestratorProfile @@ -215,9 +216,9 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { //TODO handle when there is a nextLink in the response and get more nodes vms, err := sc.client.ListVirtualMachines(sc.resourceGroupName) if err != nil { - return fmt.Errorf("failed to get vms in the resource group. Error: %s", err.Error()) + return errors.Wrap(err, "failed to get vms in the resource group") } else if len(*vms.Value) < 1 { - return fmt.Errorf("The provided resource group does not contain any vms") + return errors.New("The provided resource group does not contain any vms") } for _, vm := range *vms.Value { vmTags := *vm.Tags @@ -254,7 +255,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { if currentNodeCount > sc.newDesiredAgentCount { if sc.masterFQDN == "" { cmd.Usage() - return fmt.Errorf("master-FQDN is required to scale down a kubernetes cluster's agent pool") + return errors.New("master-FQDN is required to scale down a kubernetes cluster's agent pool") } vmsToDelete := make([]string, 0) @@ -266,39 +267,43 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { case api.Kubernetes: kubeConfig, err := acsengine.GenerateKubeConfig(sc.containerService.Properties, sc.location) if err != nil { - return fmt.Errorf("failed to generate kube config: %v", err) + return errors.Wrap(err, "failed to generate kube config") } err = sc.drainNodes(kubeConfig, vmsToDelete) if err != nil { - return fmt.Errorf("Got error %+v, while draining the nodes to be deleted", err) + return errors.Wrap(err, "Got error while draining the nodes to be deleted") } case api.OpenShift: bundle := bytes.NewReader(sc.containerService.Properties.OrchestratorProfile.OpenShiftConfig.ConfigBundles["master"]) fs, err := filesystem.NewTGZReader(bundle) if err != nil { - return fmt.Errorf("failed to read master bundle: %v", err) + return errors.Wrap(err, "failed to read master bundle") } kubeConfig, err := fs.ReadFile("etc/origin/master/admin.kubeconfig") if err != nil { - return fmt.Errorf("failed to read kube config: %v", err) + return errors.Wrap(err, "failed to read kube config") } err = sc.drainNodes(string(kubeConfig), vmsToDelete) if err != nil { - return fmt.Errorf("Got error %v, while draining the nodes to be deleted", err) + return errors.Wrap(err, "Got error while draining the nodes to be deleted") } } errList := operations.ScaleDownVMs(sc.client, sc.logger, sc.SubscriptionID.String(), sc.resourceGroupName, vmsToDelete...) if errList != nil { - errorMessage := "" + var err error + format := "Node '%s' failed to delete with error: '%s'" for element := errList.Front(); element != nil; element = element.Next() { vmError, ok := element.Value.(*operations.VMScalingErrorDetails) if ok { - error := fmt.Sprintf("Node '%s' failed to delete with error: '%s'", vmError.Name, vmError.Error.Error()) - errorMessage = errorMessage + error + if err == nil { + err = errors.Errorf(format, vmError.Name, vmError.Error.Error()) + } else { + err = errors.Wrapf(err, format, vmError.Name, vmError.Error.Error()) + } } } - return fmt.Errorf(errorMessage) + return err } return nil @@ -306,7 +311,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { } else { vmssList, err := sc.client.ListVirtualMachineScaleSets(sc.resourceGroupName) if err != nil { - return fmt.Errorf("failed to get vmss list in the resource group. Error: %s", err.Error()) + return errors.Wrap(err, "failed to get vmss list in the resource group") } for _, vmss := range *vmssList.Value { vmTags := *vmss.Tags @@ -321,6 +326,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { osPublisher := *vmss.VirtualMachineProfile.StorageProfile.ImageReference.Publisher if strings.EqualFold(osPublisher, "MicrosoftWindowsServer") { _, _, winPoolIndex, err = utils.WindowsVMSSNameParts(*vmss.Name) + log.Errorln(err) } currentNodeCount = int(*vmss.Sku.Capacity) @@ -335,18 +341,18 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { } templateGenerator, err := acsengine.InitializeTemplateGenerator(ctx, sc.classicMode) if err != nil { - return fmt.Errorf("failed to initialize template generator: %s", err.Error()) + return errors.Wrap(err, "failed to initialize template generator") } sc.containerService.Properties.AgentPoolProfiles = []*api.AgentPoolProfile{sc.agentPool} template, parameters, _, err := templateGenerator.GenerateTemplate(sc.containerService, acsengine.DefaultGeneratorCode, false, BuildTag) if err != nil { - return fmt.Errorf("error generating template %s: %s", sc.apiModelPath, err.Error()) + return errors.Wrapf(err, "error generating template %s", sc.apiModelPath) } if template, err = transform.PrettyPrintArmTemplate(template); err != nil { - return fmt.Errorf("error pretty printing template: %s", err.Error()) + return errors.Wrap(err, "error pretty printing template") } templateJSON := make(map[string]interface{}) @@ -354,12 +360,12 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { err = json.Unmarshal([]byte(template), &templateJSON) if err != nil { - return err + return errors.Wrap(err, "error unmarshaling template") } err = json.Unmarshal([]byte(parameters), ¶metersJSON) if err != nil { - return err + return errors.Wrap(err, "errror unmarshalling parameters") } transformer := transform.Transformer{Translator: ctx.Translator} @@ -378,7 +384,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { case api.OpenShift: err = transformer.NormalizeForOpenShiftVMASScalingUp(sc.logger, sc.agentPool.Name, templateJSON) if err != nil { - return fmt.Errorf("error tranforming the template for scaling template %s: %v", sc.apiModelPath, err) + return errors.Wrapf(err, "error tranforming the template for scaling template %s", sc.apiModelPath) } if sc.agentPool.IsAvailabilitySets() { addValue(parametersJSON, fmt.Sprintf("%sOffset", sc.agentPool.Name), highestUsedIndex+1) @@ -386,7 +392,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { case api.Kubernetes: err = transformer.NormalizeForK8sVMASScalingUp(sc.logger, templateJSON) if err != nil { - return fmt.Errorf("error tranforming the template for scaling template %s: %s", sc.apiModelPath, err.Error()) + return errors.Wrapf(err, "error tranforming the template for scaling template %s", sc.apiModelPath) } if sc.agentPool.IsAvailabilitySets() { addValue(parametersJSON, fmt.Sprintf("%sOffset", sc.agentPool.Name), highestUsedIndex+1) @@ -395,7 +401,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { case api.SwarmMode: case api.DCOS: if sc.agentPool.IsAvailabilitySets() { - return fmt.Errorf("scaling isn't supported for orchestrator %s, with availability sets", orchestratorInfo.OrchestratorType) + return errors.Errorf("scaling isn't supported for orchestrator %q, with availability sets", orchestratorInfo.OrchestratorType) } transformer.NormalizeForVMSSScaling(sc.logger, templateJSON) } @@ -472,7 +478,7 @@ func (sc *scaleCmd) drainNodes(kubeConfig string, vmsToDelete []string) error { for i := 0; i < numVmsToDrain; i++ { errDetails := <-errChan if errDetails != nil { - return fmt.Errorf("Node %q failed to drain with error: %v", errDetails.Name, errDetails.Error) + return errors.Wrapf(errDetails.Error, "Node %q failed to drain with error", errDetails.Name) } } diff --git a/cmd/upgrade.go b/cmd/upgrade.go index 36e458abca..a9218509d9 100644 --- a/cmd/upgrade.go +++ b/cmd/upgrade.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/acs-engine/pkg/i18n" "github.com/Azure/acs-engine/pkg/operations/kubernetesupgrade" "github.com/leonelquinteros/gotext" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -77,17 +78,17 @@ func (uc *upgradeCmd) validate(cmd *cobra.Command) error { uc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf("error loading translation files: %s", err.Error()) + return errors.Wrap(err, "error loading translation files") } if uc.resourceGroupName == "" { cmd.Usage() - return fmt.Errorf("--resource-group must be specified") + return errors.New("--resource-group must be specified") } if uc.location == "" { cmd.Usage() - return fmt.Errorf("--location must be specified") + return errors.New("--location must be specified") } uc.location = helpers.NormalizeAzureRegion(uc.location) @@ -99,12 +100,12 @@ func (uc *upgradeCmd) validate(cmd *cobra.Command) error { // TODO(colemick): add in the cmd annotation to help enable autocompletion if uc.upgradeVersion == "" { cmd.Usage() - return fmt.Errorf("--upgrade-version must be specified") + return errors.New("--upgrade-version must be specified") } if uc.deploymentDirectory == "" { cmd.Usage() - return fmt.Errorf("--deployment-dir must be specified") + return errors.New("--deployment-dir must be specified") } return nil } @@ -113,23 +114,23 @@ func (uc *upgradeCmd) loadCluster(cmd *cobra.Command) error { var err error if err = uc.authArgs.validateAuthArgs(); err != nil { - return fmt.Errorf("%s", err.Error()) + return err } if uc.client, err = uc.authArgs.getClient(); err != nil { - return fmt.Errorf("Failed to get client: %s", err.Error()) + return errors.Wrap(err, "Failed to get client") } _, err = uc.client.EnsureResourceGroup(uc.resourceGroupName, uc.location, nil) if err != nil { - return fmt.Errorf("Error ensuring resource group: %s", err.Error()) + return errors.Wrap(err, "Error ensuring resource group") } // load apimodel from the deployment directory apiModelPath := path.Join(uc.deploymentDirectory, "apimodel.json") if _, err = os.Stat(apiModelPath); os.IsNotExist(err) { - return fmt.Errorf("specified api model does not exist (%s)", apiModelPath) + return errors.Errorf("specified api model does not exist (%s)", apiModelPath) } apiloader := &api.Apiloader{ @@ -139,19 +140,19 @@ func (uc *upgradeCmd) loadCluster(cmd *cobra.Command) error { } uc.containerService, uc.apiVersion, err = apiloader.LoadContainerServiceFromFile(apiModelPath, true, true, nil) if err != nil { - return fmt.Errorf("error parsing the api model: %s", err.Error()) + return errors.Wrap(err, "error parsing the api model") } if uc.containerService.Location == "" { uc.containerService.Location = uc.location } else if uc.containerService.Location != uc.location { - return fmt.Errorf("--location does not match api model location") + return errors.New("--location does not match api model location") } // get available upgrades for container service orchestratorInfo, err := api.GetOrchestratorVersionProfile(uc.containerService.Properties.OrchestratorProfile) if err != nil { - return fmt.Errorf("error getting list of available upgrades: %s", err.Error()) + return errors.Wrap(err, "error getting list of available upgrades") } // add the current version if upgrade has failed orchestratorInfo.Upgrades = append(orchestratorInfo.Upgrades, &api.OrchestratorProfile{ @@ -168,7 +169,7 @@ func (uc *upgradeCmd) loadCluster(cmd *cobra.Command) error { } } if !found { - return fmt.Errorf("version %s is not supported", uc.upgradeVersion) + return errors.Errorf("version %s is not supported", uc.upgradeVersion) } // Read name suffix to identify nodes in the resource group that belong diff --git a/glide.lock b/glide.lock index ffff64a952..77f9fc7b84 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 90d55cc9eec600a57a5c52688b814b2daca46d5b4546fcd8a98b1d64d91422bd -updated: 2018-06-29T15:54:03.527724072-07:00 +hash: f9bc1ee5fe4bbff90c78d6851f3d91b617a0166b92137c3913d879c3cdb65876 +updated: 2018-07-02T13:50:41.754767835-07:00 imports: - name: github.com/alexcesaro/statsd version: 7fea3f0d2fab1ad973e641e51dba45443a311a90 @@ -129,6 +129,8 @@ imports: - matchers/support/goraph/node - matchers/support/goraph/util - types +- name: github.com/pkg/errors + version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/rjtsdl/conform version: 7b97338c67692998bc62b2572e96c8ff09803dc3 - name: github.com/satori/go.uuid diff --git a/glide.yaml b/glide.yaml index 74031e5f65..1186d0cca2 100644 --- a/glide.yaml +++ b/glide.yaml @@ -68,6 +68,8 @@ import: - package: golang.org/x/sync subpackages: - errgroup +- package: github.com/pkg/errors + version: ~0.8.0 testImport: - package: github.com/onsi/gomega - package: github.com/onsi/ginkgo diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index af5795c4de..d264e92e4c 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/acs-engine/pkg/helpers" "github.com/Azure/acs-engine/pkg/openshift/certgen" "github.com/blang/semver" + "github.com/pkg/errors" ) const ( @@ -883,7 +884,7 @@ func setDefaultCerts(a *api.Properties) (bool, error) { firstMasterIP := net.ParseIP(a.MasterProfile.FirstConsecutiveStaticIP).To4() if firstMasterIP == nil { - return false, fmt.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", a.MasterProfile.FirstConsecutiveStaticIP) + return false, errors.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", a.MasterProfile.FirstConsecutiveStaticIP) } ips := []net.IP{firstMasterIP} diff --git a/pkg/acsengine/engine.go b/pkg/acsengine/engine.go index 5edddb8268..f7391c7158 100644 --- a/pkg/acsengine/engine.go +++ b/pkg/acsengine/engine.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/acs-engine/pkg/api/common" "github.com/Azure/acs-engine/pkg/helpers" "github.com/ghodss/yaml" + "github.com/pkg/errors" ) var commonTemplateFiles = []string{agentOutputs, agentParams, classicParams, masterOutputs, iaasOutputs, masterParams, windowsParams} @@ -65,14 +66,14 @@ func GenerateClusterID(properties *api.Properties) string { // GenerateKubeConfig returns a JSON string representing the KubeConfig func GenerateKubeConfig(properties *api.Properties, location string) (string, error) { if properties == nil { - return "", fmt.Errorf("Properties nil in GenerateKubeConfig") + return "", errors.New("Properties nil in GenerateKubeConfig") } if properties.CertificateProfile == nil { - return "", fmt.Errorf("CertificateProfile property may not be nil in GenerateKubeConfig") + return "", errors.New("CertificateProfile property may not be nil in GenerateKubeConfig") } b, err := Asset(kubeConfigJSON) if err != nil { - return "", fmt.Errorf("error reading kube config template file %s: %s", kubeConfigJSON, err.Error()) + return "", errors.Wrapf(err, "error reading kube config template file %s", kubeConfigJSON) } kubeconfig := string(b) // variable replacement @@ -85,7 +86,7 @@ func GenerateKubeConfig(properties *api.Properties, location string) (string, er // more than 1 master, use the internal lb IP firstMasterIP := net.ParseIP(properties.MasterProfile.FirstConsecutiveStaticIP).To4() if firstMasterIP == nil { - return "", fmt.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", properties.MasterProfile.FirstConsecutiveStaticIP) + return "", errors.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", properties.MasterProfile.FirstConsecutiveStaticIP) } lbIP := net.IP{firstMasterIP[0], firstMasterIP[1], firstMasterIP[2], firstMasterIP[3] + byte(DefaultInternalLbStaticIPOffset)} kubeconfig = strings.Replace(kubeconfig, "{{WrapAsVerbatim \"reference(concat('Microsoft.Network/publicIPAddresses/', variables('masterPublicIPAddressName'))).dnsSettings.fqdn\"}}", lbIP.String(), -1) @@ -271,7 +272,7 @@ func addSecret(m paramsMap, k string, v interface{}, encode bool) { func getStorageAccountType(sizeName string) (string, error) { spl := strings.Split(sizeName, "_") if len(spl) < 2 { - return "", fmt.Errorf("Invalid sizeName: %s", sizeName) + return "", errors.Errorf("Invalid sizeName: %s", sizeName) } capability := spl[1] if strings.Contains(strings.ToLower(capability), "s") { @@ -1201,7 +1202,7 @@ func validateProfileOptedForExtension(extensionName string, profileExtensions [] func getLinkedTemplateTextForURL(rootURL, orchestrator, extensionName, version, query string) (string, error) { supportsExtension, err := orchestratorSupportsExtension(rootURL, orchestrator, extensionName, version, query) if !supportsExtension { - return "", fmt.Errorf("Extension not supported for orchestrator. Error: %s", err) + return "", errors.Wrap(err, "Extension not supported for orchestrator") } templateLinkBytes, err := getExtensionResource(rootURL, extensionName, version, "template-link.json", query) @@ -1221,11 +1222,11 @@ func orchestratorSupportsExtension(rootURL, orchestrator, extensionName, version var supportedOrchestrators []string err = json.Unmarshal(orchestratorBytes, &supportedOrchestrators) if err != nil { - return false, fmt.Errorf("Unable to parse supported-orchestrators.json for Extension %s Version %s", extensionName, version) + return false, errors.Errorf("Unable to parse supported-orchestrators.json for Extension %s Version %s", extensionName, version) } if !stringInSlice(orchestrator, supportedOrchestrators) { - return false, fmt.Errorf("Orchestrator: %s not in list of supported orchestrators for Extension: %s Version %s", orchestrator, extensionName, version) + return false, errors.Errorf("Orchestrator: %s not in list of supported orchestrators for Extension: %s Version %s", orchestrator, extensionName, version) } return true, nil @@ -1236,18 +1237,18 @@ func getExtensionResource(rootURL, extensionName, version, fileName, query strin res, err := http.Get(requestURL) if err != nil { - return nil, fmt.Errorf("Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s Error: %s", extensionName, version, fileName, requestURL, err) + return nil, errors.Wrapf(err, "Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s", extensionName, version, fileName, requestURL) } defer res.Body.Close() if res.StatusCode != 200 { - return nil, fmt.Errorf("Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s StatusCode: %s: Status: %s", extensionName, version, fileName, requestURL, strconv.Itoa(res.StatusCode), res.Status) + return nil, errors.Errorf("Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s StatusCode: %s: Status: %s", extensionName, version, fileName, requestURL, strconv.Itoa(res.StatusCode), res.Status) } body, err := ioutil.ReadAll(res.Body) if err != nil { - return nil, fmt.Errorf("Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s Error: %s", extensionName, version, fileName, requestURL, err) + return nil, errors.Wrapf(err, "Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s", extensionName, version, fileName, requestURL) } return body, nil diff --git a/pkg/acsengine/template_generator.go b/pkg/acsengine/template_generator.go index aefc02f023..d46923ae50 100644 --- a/pkg/acsengine/template_generator.go +++ b/pkg/acsengine/template_generator.go @@ -3,7 +3,6 @@ package acsengine import ( "bytes" "encoding/base64" - "errors" "fmt" "runtime/debug" "sort" @@ -15,6 +14,7 @@ import ( "github.com/Azure/acs-engine/pkg/api/common" "github.com/Azure/acs-engine/pkg/helpers" "github.com/Azure/acs-engine/pkg/i18n" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -80,7 +80,7 @@ func (t *TemplateGenerator) GenerateTemplate(containerService *api.ContainerServ defer func() { if r := recover(); r != nil { s := debug.Stack() - err = fmt.Errorf("%v - %s", r, s) + err = errors.Errorf("%v - %s", r, s) // invalidate the template and the parameters templateRaw = "" @@ -89,7 +89,7 @@ func (t *TemplateGenerator) GenerateTemplate(containerService *api.ContainerServ }() if !validateDistro(containerService) { - return templateRaw, parametersRaw, certsGenerated, fmt.Errorf("Invalid distro") + return templateRaw, parametersRaw, certsGenerated, errors.New("Invalid distro") } var b bytes.Buffer diff --git a/pkg/acsengine/tenantid.go b/pkg/acsengine/tenantid.go index 80fc1a64c2..15e0e26939 100644 --- a/pkg/acsengine/tenantid.go +++ b/pkg/acsengine/tenantid.go @@ -1,12 +1,12 @@ package acsengine import ( - "fmt" "net/http" "regexp" "github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions" "github.com/Azure/go-autorest/autorest/azure" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -25,19 +25,16 @@ func GetTenantID(env azure.Environment, subscriptionID string) (string, error) { // network error etc) subs, err := c.Get(subscriptionID) if subs.Response.Response == nil { - log.Errorf("Request failed: %v", err) - return "", fmt.Errorf("Request failed: %v", err) + return "", errors.Wrap(err, "Request failed") } // Expecting 401 StatusUnauthorized here, just read the header if subs.StatusCode != http.StatusUnauthorized { - log.Errorf("Unexpected response from Get Subscription: %v", subs.StatusCode) - return "", fmt.Errorf("Unexpected response from Get Subscription: %v", subs.StatusCode) + return "", errors.Errorf("Unexpected response from Get Subscription: %v", subs.StatusCode) } hdr := subs.Header.Get(hdrKey) if hdr == "" { - log.Errorf("Header %v not found in Get Subscription response", hdrKey) - return "", fmt.Errorf("Header %v not found in Get Subscription response", hdrKey) + return "", errors.Errorf("Header %v not found in Get Subscription response", hdrKey) } // Example value for hdr: @@ -45,8 +42,7 @@ func GetTenantID(env azure.Environment, subscriptionID string) (string, error) { r := regexp.MustCompile(`authorization_uri=".*/([0-9a-f\-]+)"`) m := r.FindStringSubmatch(hdr) if m == nil { - log.Errorf("Could not find the tenant ID in header: %s %q", hdrKey, hdr) - return "", fmt.Errorf("Could not find the tenant ID in header: %s %q", hdrKey, hdr) + return "", errors.Errorf("Could not find the tenant ID in header: %s %q", hdrKey, hdr) } return m[1], nil } diff --git a/pkg/api/apiloader.go b/pkg/api/apiloader.go index 9745b22267..8706492391 100644 --- a/pkg/api/apiloader.go +++ b/pkg/api/apiloader.go @@ -6,8 +6,6 @@ import ( "io/ioutil" "reflect" - "fmt" - "github.com/Azure/acs-engine/pkg/api/agentPoolOnlyApi/v20170831" "github.com/Azure/acs-engine/pkg/api/agentPoolOnlyApi/v20180331" apvlabs "github.com/Azure/acs-engine/pkg/api/agentPoolOnlyApi/vlabs" @@ -19,6 +17,7 @@ import ( "github.com/Azure/acs-engine/pkg/api/vlabs" "github.com/Azure/acs-engine/pkg/helpers" "github.com/Azure/acs-engine/pkg/i18n" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -85,7 +84,7 @@ func (a *Apiloader) LoadContainerService( } setContainerServiceDefaultsv20160930(containerService) if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(); validate && e != nil { return nil, e @@ -108,7 +107,7 @@ func (a *Apiloader) LoadContainerService( } setContainerServiceDefaultsv20160330(containerService) if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(); validate && e != nil { return nil, e @@ -132,7 +131,7 @@ func (a *Apiloader) LoadContainerService( } setContainerServiceDefaultsv20170131(containerService) if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(); validate && e != nil { return nil, e @@ -155,7 +154,7 @@ func (a *Apiloader) LoadContainerService( } } if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(isUpdate); validate && e != nil { return nil, e @@ -183,7 +182,7 @@ func (a *Apiloader) LoadContainerService( } } if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(isUpdate); validate && e != nil { return nil, e diff --git a/pkg/api/common/helper.go b/pkg/api/common/helper.go index 0c1182a75c..924d76e71c 100644 --- a/pkg/api/common/helper.go +++ b/pkg/api/common/helper.go @@ -1,10 +1,10 @@ package common import ( - "fmt" "regexp" "strings" + "github.com/pkg/errors" validator "gopkg.in/go-playground/validator.v9" ) @@ -19,38 +19,38 @@ func HandleValidationErrors(e validator.ValidationErrors) error { "Properties.LinuxProfile", "Properties.ServicePrincipalProfile.ClientID", "Properties.WindowsProfile.AdminUsername", "Properties.WindowsProfile.AdminPassword": - return fmt.Errorf("missing %s", ns) + return errors.Errorf("missing %s", ns) case "Properties.MasterProfile.Count": - return fmt.Errorf("MasterProfile count needs to be 1, 3, or 5") + return errors.New("MasterProfile count needs to be 1, 3, or 5") case "Properties.MasterProfile.OSDiskSizeGB": - return fmt.Errorf("Invalid os disk size of %d specified. The range of valid values are [%d, %d]", err.Value().(int), MinDiskSizeGB, MaxDiskSizeGB) + return errors.Errorf("Invalid os disk size of %d specified. The range of valid values are [%d, %d]", err.Value().(int), MinDiskSizeGB, MaxDiskSizeGB) case "Properties.MasterProfile.IPAddressCount": - return fmt.Errorf("MasterProfile.IPAddressCount needs to be in the range [%d,%d]", MinIPAddressCount, MaxIPAddressCount) + return errors.Errorf("MasterProfile.IPAddressCount needs to be in the range [%d,%d]", MinIPAddressCount, MaxIPAddressCount) case "Properties.MasterProfile.StorageProfile": - return fmt.Errorf("Unknown storageProfile '%s'. Specify either %s or %s", err.Value().(string), StorageAccount, ManagedDisks) + return errors.Errorf("Unknown storageProfile '%s'. Specify either %s or %s", err.Value().(string), StorageAccount, ManagedDisks) default: if strings.HasPrefix(ns, "Properties.AgentPoolProfiles") { switch { case strings.HasSuffix(ns, ".Name") || strings.HasSuffix(ns, "VMSize"): - return fmt.Errorf("missing %s", ns) + return errors.Errorf("missing %s", ns) case strings.HasSuffix(ns, ".Count"): - return fmt.Errorf("AgentPoolProfile count needs to be in the range [%d,%d]", MinAgentCount, MaxAgentCount) + return errors.Errorf("AgentPoolProfile count needs to be in the range [%d,%d]", MinAgentCount, MaxAgentCount) case strings.HasSuffix(ns, ".OSDiskSizeGB"): - return fmt.Errorf("Invalid os disk size of %d specified. The range of valid values are [%d, %d]", err.Value().(int), MinDiskSizeGB, MaxDiskSizeGB) + return errors.Errorf("Invalid os disk size of %d specified. The range of valid values are [%d, %d]", err.Value().(int), MinDiskSizeGB, MaxDiskSizeGB) case strings.Contains(ns, ".Ports"): - return fmt.Errorf("AgentPoolProfile Ports must be in the range[%d, %d]", MinPort, MaxPort) + return errors.Errorf("AgentPoolProfile Ports must be in the range[%d, %d]", MinPort, MaxPort) case strings.HasSuffix(ns, ".StorageProfile"): - return fmt.Errorf("Unknown storageProfile '%s'. Specify either %s or %s", err.Value().(string), StorageAccount, ManagedDisks) + return errors.Errorf("Unknown storageProfile '%s'. Specify either %s or %s", err.Value().(string), StorageAccount, ManagedDisks) case strings.Contains(ns, ".DiskSizesGB"): - return fmt.Errorf("A maximum of %d disks may be specified, The range of valid disk size values are [%d, %d]", MaxDisks, MinDiskSizeGB, MaxDiskSizeGB) + return errors.Errorf("A maximum of %d disks may be specified, The range of valid disk size values are [%d, %d]", MaxDisks, MinDiskSizeGB, MaxDiskSizeGB) case strings.HasSuffix(ns, ".IPAddressCount"): - return fmt.Errorf("AgentPoolProfile.IPAddressCount needs to be in the range [%d,%d]", MinIPAddressCount, MaxIPAddressCount) + return errors.Errorf("AgentPoolProfile.IPAddressCount needs to be in the range [%d,%d]", MinIPAddressCount, MaxIPAddressCount) default: break } } } - return fmt.Errorf("Namespace %s is not caught, %+v", ns, e) + return errors.Errorf("Namespace %s is not caught, %+v", ns, e) } // ValidateDNSPrefix is a helper function to check that a DNS Prefix is valid @@ -61,7 +61,7 @@ func ValidateDNSPrefix(dnsName string) error { return err } if !re.MatchString(dnsName) { - return fmt.Errorf("DNSPrefix '%s' is invalid. The DNSPrefix must contain between 3 and 45 characters and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number. (length was %d)", dnsName, len(dnsName)) + return errors.Errorf("DNSPrefix '%s' is invalid. The DNSPrefix must contain between 3 and 45 characters and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number. (length was %d)", dnsName, len(dnsName)) } return nil } diff --git a/pkg/api/common/net.go b/pkg/api/common/net.go index 728d4b5dde..0766c9bb67 100644 --- a/pkg/api/common/net.go +++ b/pkg/api/common/net.go @@ -1,9 +1,10 @@ package common import ( - "fmt" "net" "regexp" + + "github.com/pkg/errors" ) // CidrFirstIP returns the first IP of the provided subnet. @@ -51,7 +52,7 @@ func GetVNETSubnetIDComponents(vnetSubnetID string) (string, string, string, str } submatches := re.FindStringSubmatch(vnetSubnetID) if len(submatches) != 5 { - return "", "", "", "", fmt.Errorf("Unable to parse vnetSubnetID. Please use a vnetSubnetID with format /subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME") + return "", "", "", "", errors.New("Unable to parse vnetSubnetID. Please use a vnetSubnetID with format /subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME") } return submatches[1], submatches[2], submatches[3], submatches[4], nil } diff --git a/pkg/api/orchestrators.go b/pkg/api/orchestrators.go index 8666a8d340..de54e39e8a 100644 --- a/pkg/api/orchestrators.go +++ b/pkg/api/orchestrators.go @@ -1,7 +1,6 @@ package api import ( - "fmt" "strconv" "strings" @@ -9,6 +8,7 @@ import ( "github.com/Azure/acs-engine/pkg/api/v20170930" "github.com/Azure/acs-engine/pkg/api/vlabs" "github.com/blang/semver" + "github.com/pkg/errors" ) type orchestratorsFunc func(*OrchestratorProfile) ([]*OrchestratorVersionProfile, error) @@ -47,10 +47,10 @@ func validate(orchestrator, version string) (string, error) { return OpenShift, nil case orchestrator == "": if version != "" { - return "", fmt.Errorf("Must specify orchestrator for version '%s'", version) + return "", errors.Errorf("Must specify orchestrator for version '%s'", version) } default: - return "", fmt.Errorf("Unsupported orchestrator '%s'", orchestrator) + return "", errors.Errorf("Unsupported orchestrator '%s'", orchestrator) } return "", nil } @@ -120,7 +120,7 @@ func getOrchestratorVersionProfileList(orchestrator, version string) ([]*Orchest // GetOrchestratorVersionProfile returns orchestrator info for upgradable container service func GetOrchestratorVersionProfile(orch *OrchestratorProfile) (*OrchestratorVersionProfile, error) { if orch.OrchestratorVersion == "" { - return nil, fmt.Errorf("Missing Orchestrator Version") + return nil, errors.New("Missing Orchestrator Version") } switch orch.OrchestratorType { case Kubernetes, DCOS: @@ -130,11 +130,11 @@ func GetOrchestratorVersionProfile(orch *OrchestratorProfile) (*OrchestratorVers } // has to be exactly one element per specified orchestrator/version if len(arr) != 1 { - return nil, fmt.Errorf("Umbiguous Orchestrator Versions") + return nil, errors.New("Umbiguous Orchestrator Versions") } return arr[0], nil default: - return nil, fmt.Errorf("Upgrade operation is not supported for '%s'", orch.OrchestratorType) + return nil, errors.Errorf("Upgrade operation is not supported for '%s'", orch.OrchestratorType) } } @@ -159,7 +159,7 @@ func kubernetesInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, } } else { if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("Kubernetes version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("Kubernetes version %s is not supported", csOrch.OrchestratorVersion) } upgrades, err := kubernetesUpgrades(csOrch) @@ -218,7 +218,7 @@ func dcosInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, error } } else { if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("DCOS version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("DCOS version %s is not supported", csOrch.OrchestratorVersion) } // get info for the specified version @@ -265,7 +265,7 @@ func swarmInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, erro } if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("Swarm version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("Swarm version %s is not supported", csOrch.OrchestratorVersion) } return []*OrchestratorVersionProfile{ { @@ -291,7 +291,7 @@ func dockerceInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, e } if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("Docker CE version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("Docker CE version %s is not supported", csOrch.OrchestratorVersion) } return []*OrchestratorVersionProfile{ { @@ -323,7 +323,7 @@ func openShiftInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, } } else { if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("OpenShift version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("OpenShift version %s is not supported", csOrch.OrchestratorVersion) } // TODO: populate OrchestratorVersionProfile.Upgrades diff --git a/pkg/api/strictjson.go b/pkg/api/strictjson.go index ee00396045..a88addcb37 100644 --- a/pkg/api/strictjson.go +++ b/pkg/api/strictjson.go @@ -2,9 +2,10 @@ package api import ( "encoding/json" - "fmt" "reflect" "strings" + + "github.com/pkg/errors" ) func checkJSONKeys(data []byte, types ...reflect.Type) error { @@ -21,7 +22,7 @@ func checkMapKeys(o map[string]interface{}, types ...reflect.Type) error { for k, v := range o { f, present := fieldMap[strings.ToLower(k)] if !present { - return fmt.Errorf("Unknown JSON tag %s", k) + return errors.Errorf("Unknown JSON tag %s", k) } if f.Type.Kind() == reflect.Struct && v != nil { if childMap, exists := v.(map[string]interface{}); exists { diff --git a/pkg/armhelpers/azureclient.go b/pkg/armhelpers/azureclient.go index 0a48e08c89..275ea1b222 100644 --- a/pkg/armhelpers/azureclient.go +++ b/pkg/armhelpers/azureclient.go @@ -23,6 +23,7 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/to" "github.com/mitchellh/go-homedir" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/Azure/acs-engine/pkg/acsengine" @@ -75,7 +76,7 @@ func NewAzureClientWithDeviceAuth(env azure.Environment, subscriptionID string) home, err := homedir.Dir() if err != nil { - return nil, fmt.Errorf("Failed to get user home directory to look for cached token: %q", err) + return nil, errors.Wrap(err, "Failed to get user home directory to look for cached token") } cachePath := filepath.Join(home, ApplicationDir, "cache", fmt.Sprintf("%s_%s.token.json", tenantID, acsEngineClientID)) @@ -157,22 +158,22 @@ func NewAzureClientWithClientSecret(env azure.Environment, subscriptionID, clien func NewAzureClientWithClientCertificateFile(env azure.Environment, subscriptionID, clientID, certificatePath, privateKeyPath string) (*AzureClient, error) { certificateData, err := ioutil.ReadFile(certificatePath) if err != nil { - return nil, fmt.Errorf("Failed to read certificate: %q", err) + return nil, errors.Wrap(err, "Failed to read certificate") } block, _ := pem.Decode(certificateData) if block == nil { - return nil, fmt.Errorf("Failed to decode pem block from certificate") + return nil, errors.New("Failed to decode pem block from certificate") } certificate, err := x509.ParseCertificate(block.Bytes) if err != nil { - return nil, fmt.Errorf("Failed to parse certificate: %q", err) + return nil, errors.Wrap(err, "Failed to parse certificate") } privateKey, err := parseRsaPrivateKey(privateKeyPath) if err != nil { - return nil, fmt.Errorf("Failed to parse rsa private key: %q", err) + return nil, errors.Wrap(err, "Failed to parse rsa private key") } return NewAzureClientWithClientCertificate(env, subscriptionID, clientID, certificate, privateKey) @@ -186,11 +187,11 @@ func NewAzureClientWithClientCertificate(env azure.Environment, subscriptionID, } if certificate == nil { - return nil, fmt.Errorf("certificate should not be nil") + return nil, errors.New("certificate should not be nil") } if privateKey == nil { - return nil, fmt.Errorf("privateKey should not be nil") + return nil, errors.New("privateKey should not be nil") } armSpt, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, clientID, certificate, privateKey, env.ServiceManagementEndpoint) @@ -231,7 +232,7 @@ func tryLoadCachedToken(cachePath string) (*adal.Token, error) { token, err := adal.LoadToken(cachePath) if err != nil { - return nil, fmt.Errorf("Failed to load token from file: %v", err) + return nil, errors.Wrap(err, "Failed to load token from file") } return token, nil @@ -313,7 +314,7 @@ func (az *AzureClient) EnsureProvidersRegistered(subscriptionID string) error { return err } if registeredProviders.Value == nil { - return fmt.Errorf("Providers list was nil. subscription=%q", subscriptionID) + return errors.Errorf("Providers list was nil. subscription=%q", subscriptionID) } m := make(map[string]bool) @@ -324,7 +325,7 @@ func (az *AzureClient) EnsureProvidersRegistered(subscriptionID string) error { for _, provider := range RequiredResourceProviders { registered, ok := m[strings.ToLower(provider)] if !ok { - return fmt.Errorf("Unknown resource provider %q", provider) + return errors.Errorf("Unknown resource provider %q", provider) } if registered { log.Debugf("Already registered for %q", provider) @@ -346,7 +347,7 @@ func parseRsaPrivateKey(path string) (*rsa.PrivateKey, error) { block, _ := pem.Decode(privateKeyData) if block == nil { - return nil, fmt.Errorf("Failed to decode a pem block from private key") + return nil, errors.New("Failed to decode a pem block from private key") } privatePkcs1Key, errPkcs1 := x509.ParsePKCS1PrivateKey(block.Bytes) @@ -358,12 +359,12 @@ func parseRsaPrivateKey(path string) (*rsa.PrivateKey, error) { if errPkcs8 == nil { privatePkcs8RsaKey, ok := privatePkcs8Key.(*rsa.PrivateKey) if !ok { - return nil, fmt.Errorf("pkcs8 contained non-RSA key. Expected RSA key") + return nil, errors.New("pkcs8 contained non-RSA key. Expected RSA key") } return privatePkcs8RsaKey, nil } - return nil, fmt.Errorf("failed to parse private key as Pkcs#1 or Pkcs#8. (%s). (%s)", errPkcs1, errPkcs8) + return nil, errors.Errorf("failed to parse private key as Pkcs#1 or Pkcs#8. (%s). (%s)", errPkcs1, errPkcs8) } //AddAcceptLanguages sets the list of languages to accept on this request diff --git a/pkg/armhelpers/utils/util.go b/pkg/armhelpers/utils/util.go index 348cdb586f..3271307a7f 100644 --- a/pkg/armhelpers/utils/util.go +++ b/pkg/armhelpers/utils/util.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -53,7 +54,7 @@ func ResourceName(ID string) (string, error) { parts := strings.Split(ID, "/") name := parts[len(parts)-1] if len(name) == 0 { - return "", fmt.Errorf("resource name was missing from identifier") + return "", errors.Errorf("resource name was missing from identifier") } return name, nil @@ -79,13 +80,13 @@ func SplitBlobURI(URI string) (string, string, string, error) { func K8sLinuxVMNameParts(vmName string) (poolIdentifier, nameSuffix string, agentIndex int, err error) { vmNameParts := vmnameLinuxRegexp.FindStringSubmatch(vmName) if len(vmNameParts) != 4 { - return "", "", -1, fmt.Errorf("resource name was missing from identifier") + return "", "", -1, errors.Errorf("resource name was missing from identifier") } vmNum, err := strconv.Atoi(vmNameParts[k8sLinuxVMAgentIndexArrayIndex]) if err != nil { - return "", "", -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, errors.Wrap(err, "Error parsing VM Name") } return vmNameParts[k8sLinuxVMAgentPoolNameIndex], vmNameParts[k8sLinuxVMAgentClusterIDIndex], vmNum, nil @@ -95,7 +96,7 @@ func K8sLinuxVMNameParts(vmName string) (poolIdentifier, nameSuffix string, agen func VmssNameParts(vmssName string) (poolIdentifier, nameSuffix string, err error) { vmssNameParts := vmssnameRegexp.FindStringSubmatch(vmssName) if len(vmssNameParts) != 3 { - return "", "", fmt.Errorf("resource name was missing from identifier") + return "", "", errors.New("resource name was missing from identifier") } return vmssNameParts[vmssAgentPoolNameIndex], vmssNameParts[vmssClusterIDIndex], nil @@ -105,7 +106,7 @@ func VmssNameParts(vmssName string) (poolIdentifier, nameSuffix string, err erro func WindowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIndex int, agentIndex int, err error) { vmNameParts := vmnameWindowsRegexp.FindStringSubmatch(vmName) if len(vmNameParts) != 4 { - return "", "", -1, -1, fmt.Errorf("resource name was missing from identifier") + return "", "", -1, -1, errors.New("resource name was missing from identifier") } poolPrefix = vmNameParts[k8sWindowsVMAgentPoolPrefixIndex] @@ -114,7 +115,7 @@ func WindowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIn poolIndex, err = strconv.Atoi(poolInfo[:3]) if err != nil { - return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, -1, errors.Wrap(err, "Error parsing VM Name") } poolIndex -= 900 agentIndex, _ = strconv.Atoi(poolInfo[3:]) @@ -127,7 +128,7 @@ func WindowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIn func WindowsVMSSNameParts(vmssName string) (poolPrefix string, acsStr string, poolIndex int, err error) { vmssNameParts := vmssnameWindowsRegexp.FindStringSubmatch(vmssName) if len(vmssNameParts) != 4 { - return "", "", -1, fmt.Errorf("resource name was missing from identifier") + return "", "", -1, errors.Errorf("resource name was missing from identifier") } poolPrefix = vmssNameParts[windowsVmssAgentPoolNameIndex] @@ -136,7 +137,7 @@ func WindowsVMSSNameParts(vmssName string) (poolPrefix string, acsStr string, po poolIndex, err = strconv.Atoi(poolInfo) if err != nil { - return "", "", -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, errors.Wrap(err, "Error parsing VM Name") } poolIndex -= 900 @@ -176,5 +177,5 @@ func GetK8sVMName(osType api.OSType, isAKS bool, nameSuffix, agentPoolName strin if osType == api.Windows { return fmt.Sprintf("%s%s%d%d", nameSuffix[:5], prefix, 900+agentPoolIndex, agentIndex), nil } - return "", fmt.Errorf("Failed to reconstruct VM Name") + return "", errors.Errorf("Failed to reconstruct VM Name") } diff --git a/pkg/operations/cordondrainvm.go b/pkg/operations/cordondrainvm.go index 4a58db9b32..d37116003d 100644 --- a/pkg/operations/cordondrainvm.go +++ b/pkg/operations/cordondrainvm.go @@ -1,11 +1,11 @@ package operations import ( - "fmt" "strings" "time" "github.com/Azure/acs-engine/pkg/armhelpers" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -177,7 +177,7 @@ func (o *drainOperation) evictPods(pods []v1.Pod, policyGroupVersion string) err } else if apierrors.IsTooManyRequests(err) { time.Sleep(5 * time.Second) } else { - errCh <- fmt.Errorf("error when evicting pod %q: %v", pod.Name, err) + errCh <- errors.Wrapf(err, "error when evicting pod %q", pod.Name) return } } @@ -186,7 +186,7 @@ func (o *drainOperation) evictPods(pods []v1.Pod, policyGroupVersion string) err if err == nil { doneCh <- true } else { - errCh <- fmt.Errorf("error when waiting for pod %q terminating: %v", pod.Name, err) + errCh <- errors.Wrapf(err, "error when waiting for pod %q terminating", pod.Name) } }(pod, doneCh, errCh) } @@ -202,7 +202,7 @@ func (o *drainOperation) evictPods(pods []v1.Pod, policyGroupVersion string) err return nil } case <-time.After(o.timeout): - return fmt.Errorf("Drain did not complete within %v", o.timeout) + return errors.Errorf("Drain did not complete within %v", o.timeout) } } } diff --git a/pkg/operations/deletevm.go b/pkg/operations/deletevm.go index e7be1cfa14..70893d1a14 100644 --- a/pkg/operations/deletevm.go +++ b/pkg/operations/deletevm.go @@ -5,6 +5,7 @@ import ( "github.com/Azure/acs-engine/pkg/armhelpers" "github.com/Azure/acs-engine/pkg/armhelpers/utils" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -27,7 +28,7 @@ func CleanDeleteVirtualMachine(az armhelpers.ACSEngineClient, logger *log.Entry, if vhd == nil && managedDisk == nil { logger.Errorf("failed to get a valid os disk URI for VM: %s/%s", resourceGroup, name) - return fmt.Errorf("os disk does not have a VHD URI") + return errors.New("os disk does not have a VHD URI") } osDiskName := vm.VirtualMachineProperties.StorageProfile.OsDisk.Name diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/pkg/errors/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml new file mode 100644 index 0000000000..588ceca183 --- /dev/null +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/pkg/errors +go: + - 1.4.3 + - 1.5.4 + - 1.6.2 + - 1.7.1 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 0000000000..835ba3e755 --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 0000000000..273db3c98a --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 0000000000..a932eade02 --- /dev/null +++ b/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/vendor/github.com/pkg/errors/bench_test.go b/vendor/github.com/pkg/errors/bench_test.go new file mode 100644 index 0000000000..0416a3cbb8 --- /dev/null +++ b/vendor/github.com/pkg/errors/bench_test.go @@ -0,0 +1,59 @@ +// +build go1.7 + +package errors + +import ( + "fmt" + "testing" + + stderrors "errors" +) + +func noErrors(at, depth int) error { + if at >= depth { + return stderrors.New("no error") + } + return noErrors(at+1, depth) +} +func yesErrors(at, depth int) error { + if at >= depth { + return New("ye error") + } + return yesErrors(at+1, depth) +} + +func BenchmarkErrors(b *testing.B) { + var toperr error + type run struct { + stack int + std bool + } + runs := []run{ + {10, false}, + {10, true}, + {100, false}, + {100, true}, + {1000, false}, + {1000, true}, + } + for _, r := range runs { + part := "pkg/errors" + if r.std { + part = "errors" + } + name := fmt.Sprintf("%s-stack-%d", part, r.stack) + b.Run(name, func(b *testing.B) { + var err error + f := yesErrors + if r.std { + f = noErrors + } + b.ReportAllocs() + for i := 0; i < b.N; i++ { + err = f(0, r.stack) + } + b.StopTimer() + toperr = err + }) + } +} diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 0000000000..842ee80456 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/errors_test.go b/vendor/github.com/pkg/errors/errors_test.go new file mode 100644 index 0000000000..1d8c635586 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors_test.go @@ -0,0 +1,226 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "reflect" + "testing" +) + +func TestNew(t *testing.T) { + tests := []struct { + err string + want error + }{ + {"", fmt.Errorf("")}, + {"foo", fmt.Errorf("foo")}, + {"foo", New("foo")}, + {"string with format specifiers: %v", errors.New("string with format specifiers: %v")}, + } + + for _, tt := range tests { + got := New(tt.err) + if got.Error() != tt.want.Error() { + t.Errorf("New.Error(): got: %q, want %q", got, tt.want) + } + } +} + +func TestWrapNil(t *testing.T) { + got := Wrap(nil, "no error") + if got != nil { + t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrap(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := Wrap(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +type nilError struct{} + +func (nilError) Error() string { return "nil error" } + +func TestCause(t *testing.T) { + x := New("error") + tests := []struct { + err error + want error + }{{ + // nil error is nil + err: nil, + want: nil, + }, { + // explicit nil error is nil + err: (error)(nil), + want: nil, + }, { + // typed nil is nil + err: (*nilError)(nil), + want: (*nilError)(nil), + }, { + // uncaused error is unaffected + err: io.EOF, + want: io.EOF, + }, { + // caused error returns cause + err: Wrap(io.EOF, "ignored"), + want: io.EOF, + }, { + err: x, // return from errors.New + want: x, + }, { + WithMessage(nil, "whoops"), + nil, + }, { + WithMessage(io.EOF, "whoops"), + io.EOF, + }, { + WithStack(nil), + nil, + }, { + WithStack(io.EOF), + io.EOF, + }} + + for i, tt := range tests { + got := Cause(tt.err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) + } + } +} + +func TestWrapfNil(t *testing.T) { + got := Wrapf(nil, "no error") + if got != nil { + t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrapf(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, + {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, + } + + for _, tt := range tests { + got := Wrapf(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +func TestErrorf(t *testing.T) { + tests := []struct { + err error + want string + }{ + {Errorf("read error without format specifiers"), "read error without format specifiers"}, + {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"}, + } + + for _, tt := range tests { + got := tt.err.Error() + if got != tt.want { + t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) + } + } +} + +func TestWithStackNil(t *testing.T) { + got := WithStack(nil) + if got != nil { + t.Errorf("WithStack(nil): got %#v, expected nil", got) + } +} + +func TestWithStack(t *testing.T) { + tests := []struct { + err error + want string + }{ + {io.EOF, "EOF"}, + {WithStack(io.EOF), "EOF"}, + } + + for _, tt := range tests { + got := WithStack(tt.err).Error() + if got != tt.want { + t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want) + } + } +} + +func TestWithMessageNil(t *testing.T) { + got := WithMessage(nil, "no error") + if got != nil { + t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWithMessage(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := WithMessage(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) + } + } + +} + +// errors.New, etc values are not expected to be compared by value +// but the change in errors#27 made them incomparable. Assert that +// various kinds of errors have a functional equality operator, even +// if the result of that equality is always false. +func TestErrorEquality(t *testing.T) { + vals := []error{ + nil, + io.EOF, + errors.New("EOF"), + New("EOF"), + Errorf("EOF"), + Wrap(io.EOF, "EOF"), + Wrapf(io.EOF, "EOF%d", 2), + WithMessage(nil, "whoops"), + WithMessage(io.EOF, "whoops"), + WithStack(io.EOF), + WithStack(nil), + } + + for i := range vals { + for j := range vals { + _ = vals[i] == vals[j] // mustn't panic + } + } +} diff --git a/vendor/github.com/pkg/errors/example_test.go b/vendor/github.com/pkg/errors/example_test.go new file mode 100644 index 0000000000..c1fc13e384 --- /dev/null +++ b/vendor/github.com/pkg/errors/example_test.go @@ -0,0 +1,205 @@ +package errors_test + +import ( + "fmt" + + "github.com/pkg/errors" +) + +func ExampleNew() { + err := errors.New("whoops") + fmt.Println(err) + + // Output: whoops +} + +func ExampleNew_printf() { + err := errors.New("whoops") + fmt.Printf("%+v", err) + + // Example output: + // whoops + // github.com/pkg/errors_test.ExampleNew_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:17 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func ExampleWithMessage() { + cause := errors.New("whoops") + err := errors.WithMessage(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func ExampleWithStack() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Println(err) + + // Output: whoops +} + +func ExampleWithStack_printf() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Printf("%+v", err) + + // Example Output: + // whoops + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 +} + +func ExampleWrap() { + cause := errors.New("whoops") + err := errors.Wrap(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func fn() error { + e1 := errors.New("error") + e2 := errors.Wrap(e1, "inner") + e3 := errors.Wrap(e2, "middle") + return errors.Wrap(e3, "outer") +} + +func ExampleCause() { + err := fn() + fmt.Println(err) + fmt.Println(errors.Cause(err)) + + // Output: outer: middle: inner: error + // error +} + +func ExampleWrap_extended() { + err := fn() + fmt.Printf("%+v\n", err) + + // Example output: + // error + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.ExampleCause_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:63 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:104 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer +} + +func ExampleWrapf() { + cause := errors.New("whoops") + err := errors.Wrapf(cause, "oh noes #%d", 2) + fmt.Println(err) + + // Output: oh noes #2: whoops +} + +func ExampleErrorf_extended() { + err := errors.Errorf("whoops: %s", "foo") + fmt.Printf("%+v", err) + + // Example output: + // whoops: foo + // github.com/pkg/errors_test.ExampleErrorf + // /home/dfc/src/github.com/pkg/errors/example_test.go:101 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:102 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func Example_stackTrace() { + type stackTracer interface { + StackTrace() errors.StackTrace + } + + err, ok := errors.Cause(fn()).(stackTracer) + if !ok { + panic("oops, err does not implement stackTracer") + } + + st := err.StackTrace() + fmt.Printf("%+v", st[0:2]) // top two frames + + // Example output: + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.Example_stackTrace + // /home/dfc/src/github.com/pkg/errors/example_test.go:127 +} + +func ExampleCause_printf() { + err := errors.Wrap(func() error { + return func() error { + return errors.Errorf("hello %s", fmt.Sprintf("world")) + }() + }(), "failed") + + fmt.Printf("%v", err) + + // Output: failed: hello world +} diff --git a/vendor/github.com/pkg/errors/format_test.go b/vendor/github.com/pkg/errors/format_test.go new file mode 100644 index 0000000000..15fd7d89d7 --- /dev/null +++ b/vendor/github.com/pkg/errors/format_test.go @@ -0,0 +1,535 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "regexp" + "strings" + "testing" +) + +func TestFormatNew(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + New("error"), + "%s", + "error", + }, { + New("error"), + "%v", + "error", + }, { + New("error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatNew\n" + + "\t.+/github.com/pkg/errors/format_test.go:26", + }, { + New("error"), + "%q", + `"error"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatErrorf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Errorf("%s", "error"), + "%s", + "error", + }, { + Errorf("%s", "error"), + "%v", + "error", + }, { + Errorf("%s", "error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatErrorf\n" + + "\t.+/github.com/pkg/errors/format_test.go:56", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrap(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrap(New("error"), "error2"), + "%s", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%v", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:82", + }, { + Wrap(io.EOF, "error"), + "%s", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%v", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%+v", + "EOF\n" + + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:96", + }, { + Wrap(Wrap(io.EOF, "error1"), "error2"), + "%+v", + "EOF\n" + + "error1\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:103\n", + }, { + Wrap(New("error with space"), "context"), + "%q", + `"context: error with space"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrapf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrapf(io.EOF, "error%d", 2), + "%s", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%v", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%+v", + "EOF\n" + + "error2\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:134", + }, { + Wrapf(New("error"), "error%d", 2), + "%s", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%v", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:149", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWithStack(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithStack(io.EOF), + "%s", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%v", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:175"}, + }, { + WithStack(New("error")), + "%s", + []string{"error"}, + }, { + WithStack(New("error")), + "%v", + []string{"error"}, + }, { + WithStack(New("error")), + "%+v", + []string{"error", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189"}, + }, { + WithStack(WithStack(io.EOF)), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197"}, + }, { + WithStack(WithStack(Wrapf(io.EOF, "message"))), + "%+v", + []string{"EOF", + "message", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205"}, + }, { + WithStack(Errorf("error%d", 1)), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatWithMessage(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithMessage(New("error"), "error2"), + "%s", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%v", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%+v", + []string{ + "error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:244", + "error2"}, + }, { + WithMessage(io.EOF, "addition1"), + "%s", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%v", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%+v", + []string{"EOF", "addition1"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%v", + []string{"addition2: addition1: EOF"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%+v", + []string{"EOF", "addition1", "addition2"}, + }, { + Wrap(WithMessage(io.EOF, "error1"), "error2"), + "%+v", + []string{"EOF", "error1", "error2", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:272"}, + }, { + WithMessage(Errorf("error%d", 1), "error2"), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:278", + "error2"}, + }, { + WithMessage(WithStack(io.EOF), "error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:285", + "error"}, + }, { + WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "inside-error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "outside-error"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatGeneric(t *testing.T) { + starts := []struct { + err error + want []string + }{ + {New("new-error"), []string{ + "new-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:315"}, + }, {Errorf("errorf-error"), []string{ + "errorf-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:319"}, + }, {errors.New("errors-new-error"), []string{ + "errors-new-error"}, + }, + } + + wrappers := []wrapper{ + { + func(err error) error { return WithMessage(err, "with-message") }, + []string{"with-message"}, + }, { + func(err error) error { return WithStack(err) }, + []string{ + "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" + + ".+/github.com/pkg/errors/format_test.go:333", + }, + }, { + func(err error) error { return Wrap(err, "wrap-error") }, + []string{ + "wrap-error", + "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" + + ".+/github.com/pkg/errors/format_test.go:339", + }, + }, { + func(err error) error { return Wrapf(err, "wrapf-error%d", 1) }, + []string{ + "wrapf-error1", + "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" + + ".+/github.com/pkg/errors/format_test.go:346", + }, + }, + } + + for s := range starts { + err := starts[s].err + want := starts[s].want + testFormatCompleteCompare(t, s, err, "%+v", want, false) + testGenericRecursive(t, err, want, wrappers, 3) + } +} + +func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) { + got := fmt.Sprintf(format, arg) + gotLines := strings.SplitN(got, "\n", -1) + wantLines := strings.SplitN(want, "\n", -1) + + if len(wantLines) > len(gotLines) { + t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want) + return + } + + for i, w := range wantLines { + match, err := regexp.MatchString(w, gotLines[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want) + } + } +} + +var stackLineR = regexp.MustCompile(`\.`) + +// parseBlocks parses input into a slice, where: +// - incase entry contains a newline, its a stacktrace +// - incase entry contains no newline, its a solo line. +// +// Detecting stack boundaries only works incase the WithStack-calls are +// to be found on the same line, thats why it is optionally here. +// +// Example use: +// +// for _, e := range blocks { +// if strings.ContainsAny(e, "\n") { +// // Match as stack +// } else { +// // Match as line +// } +// } +// +func parseBlocks(input string, detectStackboundaries bool) ([]string, error) { + var blocks []string + + stack := "" + wasStack := false + lines := map[string]bool{} // already found lines + + for _, l := range strings.Split(input, "\n") { + isStackLine := stackLineR.MatchString(l) + + switch { + case !isStackLine && wasStack: + blocks = append(blocks, stack, l) + stack = "" + lines = map[string]bool{} + case isStackLine: + if wasStack { + // Detecting two stacks after another, possible cause lines match in + // our tests due to WithStack(WithStack(io.EOF)) on same line. + if detectStackboundaries { + if lines[l] { + if len(stack) == 0 { + return nil, errors.New("len of block must not be zero here") + } + + blocks = append(blocks, stack) + stack = l + lines = map[string]bool{l: true} + continue + } + } + + stack = stack + "\n" + l + } else { + stack = l + } + lines[l] = true + case !isStackLine && !wasStack: + blocks = append(blocks, l) + default: + return nil, errors.New("must not happen") + } + + wasStack = isStackLine + } + + // Use up stack + if stack != "" { + blocks = append(blocks, stack) + } + return blocks, nil +} + +func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) { + gotStr := fmt.Sprintf(format, arg) + + got, err := parseBlocks(gotStr, detectStackBoundaries) + if err != nil { + t.Fatal(err) + } + + if len(got) != len(want) { + t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q", + n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr) + } + + for i := range got { + if strings.ContainsAny(want[i], "\n") { + // Match as stack + match, err := regexp.MatchString(want[i], got[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n", + n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want)) + } + } else { + // Match as message + if got[i] != want[i] { + t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i]) + } + } + } +} + +type wrapper struct { + wrap func(err error) error + want []string +} + +func prettyBlocks(blocks []string, prefix ...string) string { + var out []string + + for _, b := range blocks { + out = append(out, fmt.Sprintf("%v", b)) + } + + return " " + strings.Join(out, "\n ") +} + +func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) { + if len(beforeWant) == 0 { + panic("beforeWant must not be empty") + } + for _, w := range list { + if len(w.want) == 0 { + panic("want must not be empty") + } + + err := w.wrap(beforeErr) + + // Copy required cause append(beforeWant, ..) modified beforeWant subtly. + beforeCopy := make([]string, len(beforeWant)) + copy(beforeCopy, beforeWant) + + beforeWant := beforeCopy + last := len(beforeWant) - 1 + var want []string + + // Merge two stacks behind each other. + if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") { + want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...) + } else { + want = append(beforeWant, w.want...) + } + + testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false) + if maxDepth > 0 { + testGenericRecursive(t, err, want, list, maxDepth-1) + } + } +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 0000000000..6b1f2891a5 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,178 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/vendor/github.com/pkg/errors/stack_test.go b/vendor/github.com/pkg/errors/stack_test.go new file mode 100644 index 0000000000..510c27a9f9 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack_test.go @@ -0,0 +1,292 @@ +package errors + +import ( + "fmt" + "runtime" + "testing" +) + +var initpc, _, _, _ = runtime.Caller(0) + +func TestFrameLine(t *testing.T) { + var tests = []struct { + Frame + want int + }{{ + Frame(initpc), + 9, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) + }(), + 20, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(1) + return Frame(pc) + }(), + 28, + }, { + Frame(0), // invalid PC + 0, + }} + + for _, tt := range tests { + got := tt.Frame.line() + want := tt.want + if want != got { + t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got) + } + } +} + +type X struct{} + +func (x X) val() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func (x *X) ptr() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func TestFrameFormat(t *testing.T) { + var tests = []struct { + Frame + format string + want string + }{{ + Frame(initpc), + "%s", + "stack_test.go", + }, { + Frame(initpc), + "%+s", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go", + }, { + Frame(0), + "%s", + "unknown", + }, { + Frame(0), + "%+s", + "unknown", + }, { + Frame(initpc), + "%d", + "9", + }, { + Frame(0), + "%d", + "0", + }, { + Frame(initpc), + "%n", + "init", + }, { + func() Frame { + var x X + return x.ptr() + }(), + "%n", + `\(\*X\).ptr`, + }, { + func() Frame { + var x X + return x.val() + }(), + "%n", + "X.val", + }, { + Frame(0), + "%n", + "", + }, { + Frame(initpc), + "%v", + "stack_test.go:9", + }, { + Frame(initpc), + "%+v", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go:9", + }, { + Frame(0), + "%v", + "unknown:0", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.Frame, tt.format, tt.want) + } +} + +func TestFuncname(t *testing.T) { + tests := []struct { + name, want string + }{ + {"", ""}, + {"runtime.main", "main"}, + {"github.com/pkg/errors.funcname", "funcname"}, + {"funcname", "funcname"}, + {"io.copyBuffer", "copyBuffer"}, + {"main.(*R).Write", "(*R).Write"}, + } + + for _, tt := range tests { + got := funcname(tt.name) + want := tt.want + if got != want { + t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got) + } + } +} + +func TestTrimGOPATH(t *testing.T) { + var tests = []struct { + Frame + want string + }{{ + Frame(initpc), + "github.com/pkg/errors/stack_test.go", + }} + + for i, tt := range tests { + pc := tt.Frame.pc() + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + got := trimGOPATH(fn.Name(), file) + testFormatRegexp(t, i, got, "%s", tt.want) + } +} + +func TestStackTrace(t *testing.T) { + tests := []struct { + err error + want []string + }{{ + New("ooh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:172", + }, + }, { + Wrap(New("ooh"), "ahh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New + }, + }, { + Cause(Wrap(New("ooh"), "ahh")), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New + }, + }, { + func() error { return New("ooh") }(), []string{ + `github.com/pkg/errors.(func·009|TestStackTrace.func1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller + }, + }, { + Cause(func() error { + return func() error { + return Errorf("hello %s", fmt.Sprintf("world")) + }() + }()), []string{ + `github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf + `github.com/pkg/errors.(func·011|TestStackTrace.func2)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller + }, + }} + for i, tt := range tests { + x, ok := tt.err.(interface { + StackTrace() StackTrace + }) + if !ok { + t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err) + continue + } + st := x.StackTrace() + for j, want := range tt.want { + testFormatRegexp(t, i, st[j], "%+v", want) + } + } +} + +func stackTrace() StackTrace { + const depth = 8 + var pcs [depth]uintptr + n := runtime.Callers(1, pcs[:]) + var st stack = pcs[0:n] + return st.StackTrace() +} + +func TestStackTraceFormat(t *testing.T) { + tests := []struct { + StackTrace + format string + want string + }{{ + nil, + "%s", + `\[\]`, + }, { + nil, + "%v", + `\[\]`, + }, { + nil, + "%+v", + "", + }, { + nil, + "%#v", + `\[\]errors.Frame\(nil\)`, + }, { + make(StackTrace, 0), + "%s", + `\[\]`, + }, { + make(StackTrace, 0), + "%v", + `\[\]`, + }, { + make(StackTrace, 0), + "%+v", + "", + }, { + make(StackTrace, 0), + "%#v", + `\[\]errors.Frame{}`, + }, { + stackTrace()[:2], + "%s", + `\[stack_test.go stack_test.go\]`, + }, { + stackTrace()[:2], + "%v", + `\[stack_test.go:225 stack_test.go:272\]`, + }, { + stackTrace()[:2], + "%+v", + "\n" + + "github.com/pkg/errors.stackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:225\n" + + "github.com/pkg/errors.TestStackTraceFormat\n" + + "\t.+/github.com/pkg/errors/stack_test.go:276", + }, { + stackTrace()[:2], + "%#v", + `\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want) + } +} From 28cffc45f1c629b7f7a702f2b76ebac01e49ba78 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Tue, 3 Jul 2018 10:47:45 -0700 Subject: [PATCH 51/74] Bring the coverage of vlabs/validate to 90% (#3382) --- pkg/api/vlabs/validate.go | 5 +- pkg/api/vlabs/validate_test.go | 618 ++++++++++++++++++++++++++++++++- 2 files changed, 611 insertions(+), 12 deletions(-) diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index 148dc1c740..229a7ec4e3 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -792,16 +792,13 @@ func (a *AgentPoolProfile) validateOrchestratorSpecificProperties(orchestratorTy return fmt.Errorf("VirtualMachineScaleSets does not support storage account attached disks. Instead specify 'StorageAccount': '%s' or specify AvailabilityProfile '%s'", ManagedDisks, AvailabilitySet) } } - if len(a.Ports) == 0 && len(a.DNSPrefix) > 0 { - return fmt.Errorf("AgentPoolProfile.Ports must be non empty when AgentPoolProfile.DNSPrefix is specified") - } return nil } func validateKeyVaultSecrets(secrets []KeyVaultSecrets, requireCertificateStore bool) error { for _, s := range secrets { if len(s.VaultCertificates) == 0 { - return fmt.Errorf("Invalid KeyVaultSecrets must have no empty VaultCertificates") + return fmt.Errorf("Valid KeyVaultSecrets must have no empty VaultCertificates") } if s.SourceVault == nil { return fmt.Errorf("missing SourceVault in KeyVaultSecrets") diff --git a/pkg/api/vlabs/validate_test.go b/pkg/api/vlabs/validate_test.go index 7a23cdb7b4..242d4cd970 100644 --- a/pkg/api/vlabs/validate_test.go +++ b/pkg/api/vlabs/validate_test.go @@ -520,6 +520,15 @@ func Test_KubernetesConfig_Validate(t *testing.T) { if err := c.Validate(k8sVersion, false); err != nil { t.Error("should not error when DNSServiceIP and ServiceCidr are valid") } + + c = KubernetesConfig{ + ClusterSubnet: "192.168.0.1/24", + NetworkPlugin: "azure", + } + + if err := c.Validate(k8sVersion, false); err == nil { + t.Error("should error when ClusterSubnet has a mask of 24 bits or higher") + } } // Tests that apply to 1.6 and later releases @@ -680,6 +689,103 @@ func Test_Properties_ValidateNetworkPluginPlusPolicy(t *testing.T) { } } +func TestProperties_ValidateLinuxProfile(t *testing.T) { + p := getK8sDefaultProperties(true) + p.LinuxProfile.SSH = struct { + PublicKeys []PublicKey `json:"publicKeys" validate:"required,len=1"` + }{ + PublicKeys: []PublicKey{{}}, + } + expectedMsg := "KeyData in LinuxProfile.SSH.PublicKeys cannot be empty string" + err := p.Validate(true) + + if err.Error() != expectedMsg { + t.Errorf("expected error message : %s to be thrown, but got : %s", expectedMsg, err.Error()) + } +} + +func TestProperties_ValidateInvalidExtensions(t *testing.T) { + + p := getK8sDefaultProperties(true) + p.OrchestratorProfile.OrchestratorVersion = "1.10.0" + + p.AgentPoolProfiles = []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: VirtualMachineScaleSets, + Extensions: []Extension{ + { + Name: "extensionName", + SingleOrAll: "single", + Template: "fakeTemplate", + }, + }, + }, + } + err := p.Validate(true) + expectedMsg := "Extensions are currently not supported with VirtualMachineScaleSets. Please specify \"availabilityProfile\": \"AvailabilitySet\"" + + if err.Error() != expectedMsg { + t.Errorf("expected error message : %s to be thrown, but got %s", expectedMsg, err.Error()) + } + +} + +func TestProperties_ValidateInvalidExtensionProfiles(t *testing.T) { + tests := []struct { + extensionProfiles []*ExtensionProfile + expectedErr error + }{ + { + extensionProfiles: []*ExtensionProfile{ + { + Name: "FakeExtensionProfile", + ExtensionParametersKeyVaultRef: &KeyvaultSecretRef{ + VaultID: "", + SecretName: "fakeSecret", + }, + }, + }, + expectedErr: errors.New("the Keyvault ID must be specified for Extension FakeExtensionProfile"), + }, + { + extensionProfiles: []*ExtensionProfile{ + { + Name: "FakeExtensionProfile", + ExtensionParametersKeyVaultRef: &KeyvaultSecretRef{ + VaultID: "fakeVaultID", + SecretName: "", + }, + }, + }, + expectedErr: errors.New("the Keyvault Secret must be specified for Extension FakeExtensionProfile"), + }, + { + extensionProfiles: []*ExtensionProfile{ + { + Name: "FakeExtensionProfile", + ExtensionParametersKeyVaultRef: &KeyvaultSecretRef{ + VaultID: "fakeVaultID", + SecretName: "fakeSecret", + }, + }, + }, + expectedErr: errors.New("Extension FakeExtensionProfile's keyvault secret reference is of incorrect format"), + }, + } + + for _, test := range tests { + p := getK8sDefaultProperties(true) + p.ExtensionProfiles = test.extensionProfiles + err := p.Validate(true) + if !reflect.DeepEqual(err, test.expectedErr) { + t.Errorf("expected error with message : %s, but got %s", test.expectedErr.Error(), err.Error()) + } + } +} + func Test_ServicePrincipalProfile_ValidateSecretOrKeyvaultSecretRef(t *testing.T) { t.Run("ServicePrincipalProfile with secret should pass", func(t *testing.T) { @@ -780,12 +886,7 @@ func TestValidateKubernetesLabelKey(t *testing.T) { } func Test_AadProfile_Validate(t *testing.T) { - properties := &Properties{ - AADProfile: &AADProfile{}, - OrchestratorProfile: &OrchestratorProfile{ - OrchestratorType: Kubernetes, - }, - } + properties := getK8sDefaultProperties(false) t.Run("Valid aadProfile should pass", func(t *testing.T) { for _, aadProfile := range []*AADProfile{ { @@ -819,14 +920,79 @@ func Test_AadProfile_Validate(t *testing.T) { ServerAppID: "403f018b-4d89-495b-b548-0cf9868cdb0a", TenantID: "1", }, + { + ClientAppID: "92444486-5bc3-4291-818b-d53ae480991b", + ServerAppID: "403f018b-4d89-495b-b548-0cf9868cdb0a", + TenantID: "feb784f6-7174-46da-aeae-da66e80c7a11", + AdminGroupID: "1", + }, {}, } { properties.AADProfile = aadProfile - if err := properties.validateAADProfile(); err == nil { + if err := properties.Validate(true); err == nil { t.Errorf("error should have occurred") } } }) + + t.Run("aadProfiles should not be supported non-Kubernetes orchestrators", func(t *testing.T) { + properties.OrchestratorProfile = &OrchestratorProfile{ + OrchestratorType: OpenShift, + } + properties.AADProfile = &AADProfile{ + ClientAppID: "92444486-5bc3-4291-818b-d53ae480991b", + ServerAppID: "403f018b-4d89-495b-b548-0cf9868cdb0a", + } + expectedMsg := "'aadProfile' is only supported by orchestrator 'Kubernetes'" + if err := properties.validateAADProfile(); err == nil || err.Error() != expectedMsg { + t.Errorf("error should have occurred with msg : %s, but got : %s", expectedMsg, err.Error()) + } + }) +} + +func TestValidateProperties_AzProfile(t *testing.T) { + p := getK8sDefaultProperties(false) + + t.Run("It returns error for unsupported orchestratorTypes", func(t *testing.T) { + p.OrchestratorProfile = &OrchestratorProfile{ + OrchestratorType: Kubernetes, + } + p.AzProfile = &AzProfile{ + TenantID: "tenant_id", + SubscriptionID: "sub_id", + ResourceGroup: "rg1", + } + expectedMsg := "'azProfile' is only supported by orchestrator 'OpenShift'" + if err := p.Validate(false); err == nil || err.Error() != expectedMsg { + t.Errorf("expected error to be thrown with message : %s", expectedMsg) + } + }) + + t.Run("It should return an error for incomplete azProfile details", func(t *testing.T) { + p.OrchestratorProfile = &OrchestratorProfile{ + OrchestratorType: OpenShift, + OpenShiftConfig: validOpenShiftConifg(), + } + p.AzProfile = &AzProfile{ + TenantID: "tenant_id", + SubscriptionID: "sub_id", + ResourceGroup: "", + } + expectedMsg := "'azProfile' must be supplied in full for orchestrator 'OpenShift'" + if err := p.validateAzProfile(); err == nil || err.Error() != expectedMsg { + t.Errorf("expected error to be thrown with message : %s", err.Error()) + } + }) + +} + +func TestProperties_ValidateInvalidStruct(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile = &OrchestratorProfile{} + expectedMsg := "missing Properties.OrchestratorProfile.OrchestratorType" + if err := p.Validate(false); err == nil || err.Error() != expectedMsg { + t.Errorf("expected validation error with message : %s", err.Error()) + } } func getK8sDefaultProperties(hasWindows bool) *Properties { @@ -1082,8 +1248,18 @@ func TestValidateImageNameAndGroup(t *testing.T) { }, } + p := getK8sDefaultProperties(true) for _, test := range tests { - gotErr := test.image.validateImageNameAndGroup() + p.AgentPoolProfiles = []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + ImageRef: &test.image, + }, + } + gotErr := p.validateAgentPoolProfiles() if !reflect.DeepEqual(gotErr, test.expectedErr) { t.Logf("scenario %q", test.name) t.Errorf("expected error: %v, got: %v", test.expectedErr, gotErr) @@ -1147,6 +1323,18 @@ func TestMasterProfileValidate(t *testing.T) { Count: 1, }, }, + { + orchestratorType: Kubernetes, + masterProfile: MasterProfile{ + DNSPrefix: "dummy", + Count: 3, + ImageRef: &ImageReference{ + Name: "", + ResourceGroup: "rg", + }, + }, + expectedErr: "imageName needs to be specified when imageResourceGroup is provided", + }, } for i, test := range tests { @@ -1164,6 +1352,155 @@ func TestMasterProfileValidate(t *testing.T) { } } +func TestProperties_ValidateAddon(t *testing.T) { + p := getK8sDefaultProperties(true) + p.AgentPoolProfiles = []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_NC6", + Count: 1, + AvailabilityProfile: AvailabilitySet, + }, + } + p.OrchestratorProfile.OrchestratorVersion = "1.9.0" + p.OrchestratorProfile.KubernetesConfig = &KubernetesConfig{ + Addons: []KubernetesAddon{ + { + Name: "nvidia-device-plugin", + Enabled: &trueVal, + }, + }, + } + + err := p.Validate(true) + expectedMsg := "NVIDIA Device Plugin add-on can only be used Kubernetes 1.10 or above. Please specify \"orchestratorRelease\": \"1.10\"" + if err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got : %s", expectedMsg, err.Error()) + } +} + +func TestProperties_ValidateVNET(t *testing.T) { + validVNetSubnetID := "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME" + validVNetSubnetID2 := "/subscriptions/SUB_ID2/resourceGroups/RG_NAME2/providers/Microsoft.Network/virtualNetworks/VNET_NAME2/subnets/SUBNET_NAME" + + p := getK8sDefaultProperties(true) + tests := []struct { + masterProfile *MasterProfile + agentPoolProfiles []*AgentPoolProfile + expectedMsg string + }{ + { + masterProfile: &MasterProfile{ + VnetSubnetID: "testvnetstring", + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: "", + }, + }, + expectedMsg: "Multiple VNET Subnet configurations specified. The master profile and each agent pool profile must all specify a custom VNET Subnet, or none at all", + }, + { + masterProfile: &MasterProfile{ + VnetSubnetID: "testvnetstring", + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: "testvnetstring", + }, + }, + expectedMsg: "Unable to parse vnetSubnetID. Please use a vnetSubnetID with format /subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME", + }, + { + masterProfile: &MasterProfile{ + VnetSubnetID: validVNetSubnetID, + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: validVNetSubnetID, + }, + { + Name: "agentpool2", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: validVNetSubnetID2, + }, + }, + expectedMsg: "Multiple VNETS specified. The master profile and each agent pool must reference the same VNET (but it is ok to reference different subnets on that VNET)", + }, + { + masterProfile: &MasterProfile{ + VnetSubnetID: validVNetSubnetID, + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + FirstConsecutiveStaticIP: "10.0.0.invalid", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: validVNetSubnetID, + }, + }, + expectedMsg: "MasterProfile.FirstConsecutiveStaticIP (with VNET Subnet specification) '10.0.0.invalid' is an invalid IP address", + }, + { + masterProfile: &MasterProfile{ + VnetSubnetID: validVNetSubnetID, + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + FirstConsecutiveStaticIP: "10.0.0.1", + VnetCidr: "10.1.0.0/invalid", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: validVNetSubnetID, + }, + }, + expectedMsg: "MasterProfile.VnetCidr '10.1.0.0/invalid' contains invalid cidr notation", + }, + } + + for _, test := range tests { + p.MasterProfile = test.masterProfile + p.AgentPoolProfiles = test.agentPoolProfiles + err := p.Validate(true) + if err.Error() != test.expectedMsg { + t.Errorf("expected error message : %s, but got %s", test.expectedMsg, err.Error()) + } + } +} + func TestOpenshiftValidate(t *testing.T) { tests := []struct { name string @@ -1324,6 +1661,32 @@ func TestOpenshiftValidate(t *testing.T) { } } +func TestWindowsProfile_Validate(t *testing.T) { + w := &WindowsProfile{} + w.WindowsImageSourceURL = "http://fakeWindowsImageSourceURL" + err := w.Validate("Mesos") + expectedMsg := "Windows Custom Images are only supported if the Orchestrator Type is DCOS or Kubernetes" + if err.Error() != expectedMsg { + t.Errorf("should error on unsupported orchType with msg : %s, but got : %s", expectedMsg, err.Error()) + } + + w.AdminUsername = "" + w.AdminPassword = "password" + err = w.Validate(Kubernetes) + expectedMsg = "WindowsProfile.AdminUsername is required, when agent pool specifies windows" + if err.Error() != expectedMsg { + t.Errorf("should error on unsupported orchType with msg : %s, but got : %s", expectedMsg, err.Error()) + } + + w.AdminUsername = "azureuser" + w.AdminPassword = "" + err = w.Validate(Kubernetes) + expectedMsg = "WindowsProfile.AdminPassword is required, when agent pool specifies windows" + if err.Error() != expectedMsg { + t.Errorf("should error on unsupported orchType with msg : %s, but got : %s", expectedMsg, err.Error()) + } +} + // validOpenShiftConifg returns a valid OpenShift config that can be use for validation tests. func validOpenShiftConifg() *OpenShiftConfig { return &OpenShiftConfig{ @@ -1413,3 +1776,242 @@ func TestValidateAgentPoolProfiles(t *testing.T) { } } } + +func TestValidate_VaultKeySecrets(t *testing.T) { + + tests := []struct { + secrets []KeyVaultSecrets + expectedErr error + }{ + { + secrets: []KeyVaultSecrets{ + { + SourceVault: &KeyVaultID{ + ID: "0a0b0c0d0e0f", + }, + VaultCertificates: []KeyVaultCertificate{}, + }, + }, + expectedErr: errors.New("Valid KeyVaultSecrets must have no empty VaultCertificates"), + }, + { + secrets: []KeyVaultSecrets{ + { + SourceVault: &KeyVaultID{}, + VaultCertificates: []KeyVaultCertificate{ + { + CertificateURL: "dummyURL", + CertificateStore: "dummyCertStore", + }, + }, + }, + }, + expectedErr: errors.New("KeyVaultSecrets must have a SourceVault.ID"), + }, + { + secrets: []KeyVaultSecrets{ + { + VaultCertificates: []KeyVaultCertificate{ + { + CertificateURL: "dummyURL", + CertificateStore: "dummyCertStore", + }, + }, + }, + }, + expectedErr: errors.New("missing SourceVault in KeyVaultSecrets"), + }, + { + secrets: []KeyVaultSecrets{ + { + SourceVault: &KeyVaultID{ + ID: "0a0b0c0d0e0f", + }, + VaultCertificates: []KeyVaultCertificate{ + { + CertificateURL: "dummyUrl", + CertificateStore: "", + }, + }, + }, + }, + expectedErr: errors.New("KeyVaultCertificate.CertificateStore must be a non-empty value for certificates in a WindowsProfile"), + }, + } + + for _, test := range tests { + err := validateKeyVaultSecrets(test.secrets, true) + if !reflect.DeepEqual(err, test.expectedErr) { + t.Errorf("expected error to be thrown with msg : %s", test.expectedErr.Error()) + } + } +} + +func TestValidateProperties_OrchestratorSpecificProperties(t *testing.T) { + t.Run("Should not support DNS prefix for Kubernetes orchestrators", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DNSPrefix = "sampleprefix" + expectedMsg := "AgentPoolProfile.DNSPrefix must be empty for Kubernetes" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s", expectedMsg) + } + }) + + t.Run("Should not contain agentPool ports for Kubernetes orchestrators", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].Ports = []int{80, 443, 8080} + expectedMsg := "AgentPoolProfile.Ports must be empty for Kubernetes" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should not support ScaleSetEviction policies with regular priority", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].Ports = []int{} + agentPoolProfiles[0].ScaleSetPriority = "Regular" + agentPoolProfiles[0].ScaleSetEvictionPolicy = "Deallocate" + expectedMsg := "property 'AgentPoolProfile.ScaleSetEvictionPolicy' must be empty for AgentPoolProfile.Priority of Regular" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should contain a valid DNS prefix", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DNSPrefix = "invalid_prefix" + expectedMsg := "DNSPrefix 'invalid_prefix' is invalid. The DNSPrefix must contain between 3 and 45 characters and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number. (length was 14)" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should not contain ports when DNS prefix is empty", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].Ports = []int{80, 443} + expectedMsg := "AgentPoolProfile.Ports must be empty when AgentPoolProfile.DNSPrefix is empty for Orchestrator: OpenShift" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should contain unique ports", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].Ports = []int{80, 443, 80} + agentPoolProfiles[0].DNSPrefix = "sampleprefix" + expectedMsg := "agent profile 'agentpool' has duplicate port '80', ports must be unique" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should contain valid Storage Profile", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DiskSizesGB = []int{512, 256, 768} + agentPoolProfiles[0].DNSPrefix = "sampleprefix" + expectedMsg := "property 'StorageProfile' must be set to either 'StorageAccount' or 'ManagedDisks' when attaching disks" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should contain valid Availability Profile", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DiskSizesGB = []int{512, 256, 768} + agentPoolProfiles[0].StorageProfile = "ManagedDisks" + agentPoolProfiles[0].AvailabilityProfile = "InvalidAvailabilityProfile" + expectedMsg := "property 'AvailabilityProfile' must be set to either 'VirtualMachineScaleSets' or 'AvailabilitySet' when attaching disks" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should not support both VirtualMachineScaleSets and StorageAccount", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DiskSizesGB = []int{512, 256, 768} + agentPoolProfiles[0].StorageProfile = "StorageAccount" + agentPoolProfiles[0].AvailabilityProfile = "VirtualMachineScaleSets" + expectedMsg := "VirtualMachineScaleSets does not support storage account attached disks. Instead specify 'StorageAccount': 'ManagedDisks' or specify AvailabilityProfile 'AvailabilitySet'" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) +} + +func TestValidateProperties_CustomNodeLabels(t *testing.T) { + + t.Run("Should throw error for invalid Kubernetes Label Keys", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].CustomNodeLabels = map[string]string{ + "a/b/c": "a", + } + expectedMsg := "Label key 'a/b/c' is invalid. Valid label keys have two segments: an optional prefix and name, separated by a slash (/). The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots (.), not longer than 253 characters in total, followed by a slash (/)" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should throw error for invalid Kubernetes Label Values", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].CustomNodeLabels = map[string]string{ + "fookey": "b$$a$$r", + } + expectedMsg := "Label value 'b$$a$$r' is invalid. Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should not support orchestratorTypes other than Kubernetes/DCOS", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = SwarmMode + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].CustomNodeLabels = map[string]string{ + "foo": "bar", + } + expectedMsg := "Agent CustomNodeLabels are only supported for DCOS and Kubernetes" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) +} + +func TestAgentPoolProfile_ValidateAvailabilityProfile(t *testing.T) { + t.Run("Should fail for invalid availability profile", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].AvailabilityProfile = "InvalidAvailabilityProfile" + expectedMsg := "unknown availability profile type 'InvalidAvailabilityProfile' for agent pool 'agentpool'. Specify either AvailabilitySet, or VirtualMachineScaleSets" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should fail when using VirtualMachineScalesets with Openshift", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].AvailabilityProfile = VirtualMachineScaleSets + expectedMsg := "Only AvailabilityProfile: AvailabilitySet is supported for Orchestrator 'OpenShift'" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) +} From 91150cdcb21c3b38157236c2b345a894f46b43a9 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Tue, 3 Jul 2018 15:57:16 -0700 Subject: [PATCH 52/74] fix doc (#3411) --- docs/clusterdefinition.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index 3d76cd3704..040ea7136e 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -42,7 +42,7 @@ Here are the valid values for the orchestrator types: | dnsServiceIP | no | IP address for kube-dns to listen on. If specified must be in the range of `serviceCidr` | | dockerBridgeSubnet | no | The specific IP and subnet used for allocating IP addresses for the docker bridge network created on the kubernetes master and agents. Default value is 172.17.0.1/16. This value is used to configure the docker daemon using the [--bip flag](https://docs.docker.com/engine/userguide/networking/default_network/custom-docker0) | | dockerEngineVersion | no | Which version of docker-engine to use in your cluster, e.g. "17.03.\*" | -| enableAggregatedAPIs | no | Enable [Kubernetes Aggregated APIs](https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/).This is required by [Service Catalog](https://github.com/kubernetes-incubator/service-catalog/blob/master/README.md). (boolean - default == false) | +| enableAggregatedAPIs | no | Enable [Kubernetes Aggregated APIs](https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/).This is required by [Service Catalog](https://github.com/kubernetes-incubator/service-catalog/blob/master/README.md). (boolean - default is true for k8s versions greater or equal to 1.9.0, false otherwise) | | enableDataEncryptionAtRest | no | Enable [kubernetes data encryption at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | | enableEncryptionWithExternalKms | no | Enable [kubernetes data encryption at rest with external KMS](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | | enablePodSecurityPolicy | no | Enable [kubernetes pod security policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/).This is currently a beta feature. (boolean - default == false) | From 3d7ac0eaf4522ed2770ac9029e186153fa1939ed Mon Sep 17 00:00:00 2001 From: Yongli Chen Date: Tue, 3 Jul 2018 17:00:08 -0700 Subject: [PATCH 53/74] update azure-npm to version v0.0.3 (#3410) --- .../k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml index 67110032ec..21e30e993f 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml @@ -73,7 +73,7 @@ spec: operator: Exists containers: - name: azure-npm - image: containernetworking/azure-npm:v0.0.2 + image: containernetworking/azure-npm:v0.0.3 securityContext: privileged: true env: From 09351bbedf1cd8385217ebe8fe5a241e316d30c3 Mon Sep 17 00:00:00 2001 From: Patrick Lang Date: Tue, 3 Jul 2018 17:04:40 -0700 Subject: [PATCH 54/74] Adding test for and fixing daemonsets that should not run on Windows (#3407) --- parts/k8s/addons/azure-cni-networkmonitor.yaml | 2 ++ ...kubernetesmasteraddons-azure-npm-daemonset.yaml | 4 +++- .../kubernetesmasteraddons-calico-daemonset.yaml | 2 ++ .../kubernetesmasteraddons-flannel-daemonset.yaml | 1 + ...esmasteraddons-kube-rescheduler-deployment.yaml | 2 ++ test/e2e/kubernetes/kubernetes_test.go | 14 ++++++++++++++ 6 files changed, 24 insertions(+), 1 deletion(-) diff --git a/parts/k8s/addons/azure-cni-networkmonitor.yaml b/parts/k8s/addons/azure-cni-networkmonitor.yaml index 7bc3127e61..390044fcc7 100644 --- a/parts/k8s/addons/azure-cni-networkmonitor.yaml +++ b/parts/k8s/addons/azure-cni-networkmonitor.yaml @@ -20,6 +20,8 @@ spec: tolerations: - key: CriticalAddonsOnly operator: Exists + nodeSelector: + beta.kubernetes.io/os: linux containers: - name: azure-cnms image: diff --git a/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml index 21e30e993f..06ba15756d 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml @@ -71,6 +71,8 @@ spec: tolerations: - key: CriticalAddonsOnly operator: Exists + nodeSelector: + beta.kubernetes.io/os: linux containers: - name: azure-npm image: containernetworking/azure-npm:v0.0.3 @@ -98,4 +100,4 @@ spec: hostPath: path: /run/xtables.lock type: File - serviceAccountName: azure-npm + serviceAccountName: azure-npm \ No newline at end of file diff --git a/parts/k8s/addons/kubernetesmasteraddons-calico-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-calico-daemonset.yaml index 85d3bd4d5a..c53052ec8d 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-calico-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-calico-daemonset.yaml @@ -210,6 +210,8 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: hostNetwork: true + nodeSelector: + beta.kubernetes.io/os: linux tolerations: - key: CriticalAddonsOnly operator: Exists diff --git a/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml index 3180d73e51..b9c2f956e8 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml @@ -56,6 +56,7 @@ spec: hostNetwork: true nodeSelector: beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux tolerations: - key: node-role.kubernetes.io/master operator: Equal diff --git a/parts/k8s/addons/kubernetesmasteraddons-kube-rescheduler-deployment.yaml b/parts/k8s/addons/kubernetesmasteraddons-kube-rescheduler-deployment.yaml index fc62947df0..64d5c688a4 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-kube-rescheduler-deployment.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-kube-rescheduler-deployment.yaml @@ -19,6 +19,8 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - image: name: rescheduler diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 1dbb7152ef..5007be4418 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -777,6 +777,20 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } }) + It("Should not have any unready or crashing pods right after deployment", func() { + if eng.HasWindowsAgents() { + By("Checking ready status of each pod in kube-system") + pods, err := pod.GetAll("kube-system") + Expect(err).NotTo(HaveOccurred()) + Expect(len(pods.Pods)).ToNot(BeZero()) + for _, currentPod := range pods.Pods { + log.Printf("Checking %s", currentPod.Metadata.Name) + Expect(currentPod.Status.ContainerStatuses[0].Ready).To(BeTrue()) + Expect(currentPod.Status.ContainerStatuses[0].RestartCount).To(BeNumerically("<", 3)) + } + } + }) + // Windows Bug 16598869 /* It("should be able to reach hostport in an iis webserver", func() { From 2a15ee08089f73321c2c6e65781be47ed6151c58 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 5 Jul 2018 10:54:53 -0700 Subject: [PATCH 55/74] remove version check from k8s e2e (#3419) --- test/e2e/kubernetes/kubernetes_test.go | 25 ------------------------- test/e2e/kubernetes/node/node.go | 2 +- 2 files changed, 1 insertion(+), 26 deletions(-) diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 5007be4418..945330101d 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -8,7 +8,6 @@ import ( "os/exec" "path/filepath" "regexp" - "strings" "time" "github.com/Azure/acs-engine/pkg/api/common" @@ -165,30 +164,6 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } }) - It("should be running the expected version", func() { - hasWindows := eng.HasWindowsAgents() - version, err := node.Version() - Expect(err).NotTo(HaveOccurred()) - - var expectedVersion string - if eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorRelease != "" || - eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorVersion != "" { - expectedVersion = common.RationalizeReleaseAndVersion( - common.Kubernetes, - eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorRelease, - eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorVersion, - hasWindows) - } else { - expectedVersion = common.RationalizeReleaseAndVersion( - common.Kubernetes, - eng.Config.OrchestratorRelease, - eng.Config.OrchestratorVersion, - hasWindows) - } - expectedVersionRationalized := strings.Split(expectedVersion, "-")[0] // to account for -alpha and -beta suffixes - Expect(version).To(Equal("v" + expectedVersionRationalized)) - }) - It("should have kube-dns running", func() { running, err := pod.WaitOnReady("kube-dns", "kube-system", 3, 30*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/kubernetes/node/node.go b/test/e2e/kubernetes/node/node.go index 8c7d649544..f57215a3f4 100644 --- a/test/e2e/kubernetes/node/node.go +++ b/test/e2e/kubernetes/node/node.go @@ -15,7 +15,7 @@ import ( const ( //ServerVersion is used to parse out the version of the API running - ServerVersion = `(Server Version:\s)+(v\d+.\d+.\d+)+` + ServerVersion = `(Server Version:\s)+(.*)` ) // Node represents the kubernetes Node Resource From a8cd0a4cb1804346df7fc4f6a79f5fd32d7f1114 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Thu, 5 Jul 2018 10:58:23 -0700 Subject: [PATCH 56/74] Unit tests (#3344) --- cmd/root.go | 39 ++++--- cmd/root_test.go | 21 ++++ pkg/acsengine/defaults-apiserver_test.go | 109 ++++-------------- pkg/acsengine/defaults-kubelet_test.go | 30 ++--- pkg/acsengine/defaults-scheduler_test.go | 6 +- pkg/acsengine/mocks.go | 90 +++++++++++++++ pkg/acsengine/output.go | 3 + pkg/acsengine/output_test.go | 83 +++++++++++++ .../kubernetesupgrade/upgradecluster_test.go | 72 ++---------- 9 files changed, 267 insertions(+), 186 deletions(-) create mode 100644 cmd/root_test.go create mode 100644 pkg/acsengine/mocks.go create mode 100644 pkg/acsengine/output_test.go diff --git a/cmd/root.go b/cmd/root.go index a95c6578f8..d6b2dd6686 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -45,24 +45,7 @@ func NewRootCmd() *cobra.Command { rootCmd.AddCommand(newUpgradeCmd()) rootCmd.AddCommand(newScaleCmd()) rootCmd.AddCommand(newDcosUpgradeCmd()) - - var completionCmd = &cobra.Command{ - Use: "completion", - Short: "Generates bash completion scripts", - Long: `To load completion run - - source <(acs-engine completion) - - To configure your bash shell to load completions for each session, add this to your bashrc - - # ~/.bashrc or ~/.profile - source <(acs-engine completion) - `, - Run: func(cmd *cobra.Command, args []string) { - rootCmd.GenBashCompletion(os.Stdout) - }, - } - rootCmd.AddCommand(completionCmd) + rootCmd.AddCommand(getCompletionCmd(rootCmd)) return rootCmd } @@ -144,3 +127,23 @@ func (authArgs *authArgs) getClient() (*armhelpers.AzureClient, error) { client.AddAcceptLanguages([]string{authArgs.language}) return client, nil } + +func getCompletionCmd(root *cobra.Command) *cobra.Command { + var completionCmd = &cobra.Command{ + Use: "completion", + Short: "Generates bash completion scripts", + Long: `To load completion run + + source <(acs-engine completion) + + To configure your bash shell to load completions for each session, add this to your bashrc + + # ~/.bashrc or ~/.profile + source <(acs-engine completion) + `, + Run: func(cmd *cobra.Command, args []string) { + root.GenBashCompletion(os.Stdout) + }, + } + return completionCmd +} diff --git a/cmd/root_test.go b/cmd/root_test.go new file mode 100644 index 0000000000..fb63c7ed49 --- /dev/null +++ b/cmd/root_test.go @@ -0,0 +1,21 @@ +package cmd + +import ( + "testing" + + "github.com/spf13/cobra" +) + +func TestNewRootCmd(t *testing.T) { + output := NewRootCmd() + if output.Use != rootName || output.Short != rootShortDescription || output.Long != rootLongDescription { + t.Fatalf("root command should have use %s equal %s, short %s equal %s and long %s equal to %s", output.Use, rootName, output.Short, rootShortDescription, output.Long, rootLongDescription) + } + expectedCommands := []*cobra.Command{getCompletionCmd(output), newDcosUpgradeCmd(), newDeployCmd(), newGenerateCmd(), newOrchestratorsCmd(), newScaleCmd(), newUpgradeCmd(), newVersionCmd()} + rc := output.Commands() + for i, c := range expectedCommands { + if rc[i].Use != c.Use { + t.Fatalf("root command should have command %s", c.Use) + } + } +} diff --git a/pkg/acsengine/defaults-apiserver_test.go b/pkg/acsengine/defaults-apiserver_test.go index 7ea3d972d8..3e69c4dfee 100644 --- a/pkg/acsengine/defaults-apiserver_test.go +++ b/pkg/acsengine/defaults-apiserver_test.go @@ -5,14 +5,13 @@ import ( "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/acs-engine/pkg/helpers" - "github.com/satori/go.uuid" ) const defaultTestClusterVer = "1.7.12" func TestAPIServerConfigEnableDataEncryptionAtRest(t *testing.T) { // Test EnableDataEncryptionAtRest = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableDataEncryptionAtRest = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -22,7 +21,7 @@ func TestAPIServerConfigEnableDataEncryptionAtRest(t *testing.T) { } // Test EnableDataEncryptionAtRest = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableDataEncryptionAtRest = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -34,7 +33,7 @@ func TestAPIServerConfigEnableDataEncryptionAtRest(t *testing.T) { func TestAPIServerConfigEnableEncryptionWithExternalKms(t *testing.T) { // Test EnableEncryptionWithExternalKms = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableEncryptionWithExternalKms = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -44,7 +43,7 @@ func TestAPIServerConfigEnableEncryptionWithExternalKms(t *testing.T) { } // Test EnableEncryptionWithExternalKms = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableEncryptionWithExternalKms = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -56,7 +55,7 @@ func TestAPIServerConfigEnableEncryptionWithExternalKms(t *testing.T) { func TestAPIServerConfigEnableAggregatedAPIs(t *testing.T) { // Test EnableAggregatedAPIs = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableAggregatedAPIs = true setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -90,7 +89,7 @@ func TestAPIServerConfigEnableAggregatedAPIs(t *testing.T) { } // Test EnableAggregatedAPIs = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableAggregatedAPIs = false setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -106,7 +105,7 @@ func TestAPIServerConfigEnableAggregatedAPIs(t *testing.T) { func TestAPIServerConfigUseCloudControllerManager(t *testing.T) { // Test UseCloudControllerManager = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -120,7 +119,7 @@ func TestAPIServerConfigUseCloudControllerManager(t *testing.T) { } // Test UseCloudControllerManager = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -136,7 +135,7 @@ func TestAPIServerConfigUseCloudControllerManager(t *testing.T) { func TestAPIServerConfigHasAadProfile(t *testing.T) { // Test HasAadProfile = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.AADProfile = &api.AADProfile{ ServerAppID: "test-id", TenantID: "test-tenant", @@ -161,7 +160,7 @@ func TestAPIServerConfigHasAadProfile(t *testing.T) { } // Test OIDC user overrides - cs = createContainerService("testcluster", "1.7.12", 3, 2) + cs = CreateMockContainerService("testcluster", "1.7.12", 3, 2, false) cs.Properties.AADProfile = &api.AADProfile{ ServerAppID: "test-id", TenantID: "test-tenant", @@ -196,7 +195,7 @@ func TestAPIServerConfigHasAadProfile(t *testing.T) { } // Test China Cloud settings - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.AADProfile = &api.AADProfile{ ServerAppID: "test-id", TenantID: "test-tenant", @@ -234,7 +233,7 @@ func TestAPIServerConfigHasAadProfile(t *testing.T) { } // Test HasAadProfile = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig for _, key := range []string{"--oidc-username-claim", "--oidc-groups-claim", "--oidc-client-id", "--oidc-issuer-url"} { @@ -247,7 +246,7 @@ func TestAPIServerConfigHasAadProfile(t *testing.T) { func TestAPIServerConfigEnableRbac(t *testing.T) { // Test EnableRbac = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableRbac = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -257,7 +256,7 @@ func TestAPIServerConfigEnableRbac(t *testing.T) { } // Test EnableRbac = true with 1.6 cluster - cs = createContainerService("testcluster", "1.6.11", 3, 2) + cs = CreateMockContainerService("testcluster", "1.6.11", 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableRbac = helpers.PointerToBool(true) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -267,7 +266,7 @@ func TestAPIServerConfigEnableRbac(t *testing.T) { } // Test EnableRbac = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableRbac = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -277,7 +276,7 @@ func TestAPIServerConfigEnableRbac(t *testing.T) { } // Test EnableRbac = false with 1.6 cluster - cs = createContainerService("testcluster", "1.6.11", 3, 2) + cs = CreateMockContainerService("testcluster", "1.6.11", 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableRbac = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -289,7 +288,7 @@ func TestAPIServerConfigEnableRbac(t *testing.T) { func TestAPIServerConfigEnableSecureKubelet(t *testing.T) { // Test EnableSecureKubelet = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -303,7 +302,7 @@ func TestAPIServerConfigEnableSecureKubelet(t *testing.T) { } // Test EnableSecureKubelet = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -320,7 +319,7 @@ func TestAPIServerConfigDefaultAdmissionControls(t *testing.T) { version := "1.10.0" enableAdmissionPluginsKey := "--enable-admission-plugins" admissonControlKey := "--admission-control" - cs := createContainerService("testcluster", version, 3, 2) + cs := CreateMockContainerService("testcluster", version, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig = map[string]string{} cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig[admissonControlKey] = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages,ExtendedResourceToleration" setAPIServerConfig(cs) @@ -338,7 +337,7 @@ func TestAPIServerConfigDefaultAdmissionControls(t *testing.T) { // Test --admission-control for v1.9 and below version = "1.9.0" - cs = createContainerService("testcluster", version, 3, 2) + cs = CreateMockContainerService("testcluster", version, 3, 2, false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -352,71 +351,3 @@ func TestAPIServerConfigDefaultAdmissionControls(t *testing.T) { t.Fatalf("Admission control key '%s' not set in API server config for version %s", enableAdmissionPluginsKey, version) } } - -func createContainerService(containerServiceName string, orchestratorVersion string, masterCount int, agentCount int) *api.ContainerService { - cs := api.ContainerService{} - cs.ID = uuid.NewV4().String() - cs.Location = "eastus" - cs.Name = containerServiceName - - cs.Properties = &api.Properties{} - - cs.Properties.MasterProfile = &api.MasterProfile{} - cs.Properties.MasterProfile.Count = masterCount - cs.Properties.MasterProfile.DNSPrefix = "testmaster" - cs.Properties.MasterProfile.VMSize = "Standard_D2_v2" - - cs.Properties.AgentPoolProfiles = []*api.AgentPoolProfile{} - agentPool := &api.AgentPoolProfile{} - agentPool.Count = agentCount - agentPool.Name = "agentpool1" - agentPool.VMSize = "Standard_D2_v2" - agentPool.OSType = "Linux" - agentPool.AvailabilityProfile = "AvailabilitySet" - agentPool.StorageProfile = "StorageAccount" - - cs.Properties.AgentPoolProfiles = append(cs.Properties.AgentPoolProfiles, agentPool) - - cs.Properties.LinuxProfile = &api.LinuxProfile{ - AdminUsername: "azureuser", - SSH: struct { - PublicKeys []api.PublicKey `json:"publicKeys"` - }{}, - } - - cs.Properties.LinuxProfile.AdminUsername = "azureuser" - cs.Properties.LinuxProfile.SSH.PublicKeys = append( - cs.Properties.LinuxProfile.SSH.PublicKeys, api.PublicKey{KeyData: "test"}) - - cs.Properties.ServicePrincipalProfile = &api.ServicePrincipalProfile{} - cs.Properties.ServicePrincipalProfile.ClientID = "DEC923E3-1EF1-4745-9516-37906D56DEC4" - cs.Properties.ServicePrincipalProfile.Secret = "DEC923E3-1EF1-4745-9516-37906D56DEC4" - - cs.Properties.OrchestratorProfile = &api.OrchestratorProfile{} - cs.Properties.OrchestratorProfile.OrchestratorType = api.Kubernetes - cs.Properties.OrchestratorProfile.OrchestratorVersion = orchestratorVersion - cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{ - EnableSecureKubelet: helpers.PointerToBool(api.DefaultSecureKubeletEnabled), - EnableRbac: helpers.PointerToBool(api.DefaultRBACEnabled), - EtcdDiskSizeGB: DefaultEtcdDiskSize, - ServiceCIDR: DefaultKubernetesServiceCIDR, - DockerBridgeSubnet: DefaultDockerBridgeSubnet, - DNSServiceIP: DefaultKubernetesDNSServiceIP, - GCLowThreshold: DefaultKubernetesGCLowThreshold, - GCHighThreshold: DefaultKubernetesGCHighThreshold, - MaxPods: DefaultKubernetesMaxPodsVNETIntegrated, - ClusterSubnet: DefaultKubernetesSubnet, - ContainerRuntime: DefaultContainerRuntime, - NetworkPlugin: DefaultNetworkPlugin, - NetworkPolicy: DefaultNetworkPolicy, - EtcdVersion: DefaultEtcdVersion, - KubeletConfig: make(map[string]string), - } - - cs.Properties.CertificateProfile = &api.CertificateProfile{} - cs.Properties.CertificateProfile.CaCertificate = "cacert" - cs.Properties.CertificateProfile.KubeConfigCertificate = "kubeconfigcert" - cs.Properties.CertificateProfile.KubeConfigPrivateKey = "kubeconfigkey" - - return &cs -} diff --git a/pkg/acsengine/defaults-kubelet_test.go b/pkg/acsengine/defaults-kubelet_test.go index 7a8e68783f..b2c51d78c3 100644 --- a/pkg/acsengine/defaults-kubelet_test.go +++ b/pkg/acsengine/defaults-kubelet_test.go @@ -8,7 +8,7 @@ import ( ) func TestKubeletConfigDefaults(t *testing.T) { - cs := createContainerService("testcluster", "1.8.6", 3, 2) + cs := CreateMockContainerService("testcluster", "1.8.6", 3, 2, false) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig // TODO test all default config values @@ -22,7 +22,7 @@ func TestKubeletConfigDefaults(t *testing.T) { } } - cs = createContainerService("testcluster", "1.8.6", 3, 2) + cs = CreateMockContainerService("testcluster", "1.8.6", 3, 2, false) // TODO test all default overrides overrideVal := "/etc/override" cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig = map[string]string{ @@ -40,7 +40,7 @@ func TestKubeletConfigDefaults(t *testing.T) { func TestKubeletConfigUseCloudControllerManager(t *testing.T) { // Test UseCloudControllerManager = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager = helpers.PointerToBool(true) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -50,7 +50,7 @@ func TestKubeletConfigUseCloudControllerManager(t *testing.T) { } // Test UseCloudControllerManager = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager = helpers.PointerToBool(false) setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -63,7 +63,7 @@ func TestKubeletConfigUseCloudControllerManager(t *testing.T) { func TestKubeletConfigCloudConfig(t *testing.T) { // Test default value and custom value for --cloud-config - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig if k["--cloud-config"] != "/etc/kubernetes/azure.json" { @@ -71,7 +71,7 @@ func TestKubeletConfigCloudConfig(t *testing.T) { k["--cloud-config"]) } - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig["--cloud-config"] = "custom.json" setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -83,7 +83,7 @@ func TestKubeletConfigCloudConfig(t *testing.T) { func TestKubeletConfigAzureContainerRegistryCofig(t *testing.T) { // Test default value and custom value for --azure-container-registry-config - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig if k["--azure-container-registry-config"] != "/etc/kubernetes/azure.json" { @@ -91,7 +91,7 @@ func TestKubeletConfigAzureContainerRegistryCofig(t *testing.T) { k["--azure-container-registry-config"]) } - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig["--azure-container-registry-config"] = "custom.json" setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -103,7 +103,7 @@ func TestKubeletConfigAzureContainerRegistryCofig(t *testing.T) { func TestKubeletConfigNetworkPlugin(t *testing.T) { // Test NetworkPlugin = "kubenet" - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = NetworkPluginKubenet setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -113,7 +113,7 @@ func TestKubeletConfigNetworkPlugin(t *testing.T) { } // Test NetworkPlugin = "azure" - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = NetworkPluginAzure setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -126,7 +126,7 @@ func TestKubeletConfigNetworkPlugin(t *testing.T) { func TestKubeletConfigEnableSecureKubelet(t *testing.T) { // Test EnableSecureKubelet = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet = helpers.PointerToBool(true) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -144,7 +144,7 @@ func TestKubeletConfigEnableSecureKubelet(t *testing.T) { } // Test EnableSecureKubelet = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet = helpers.PointerToBool(false) setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -158,7 +158,7 @@ func TestKubeletConfigEnableSecureKubelet(t *testing.T) { } func TestKubeletMaxPods(t *testing.T) { - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = NetworkPluginAzure setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -167,7 +167,7 @@ func TestKubeletMaxPods(t *testing.T) { NetworkPluginAzure, k["--max-pods"]) } - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = NetworkPluginKubenet setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -178,7 +178,7 @@ func TestKubeletMaxPods(t *testing.T) { } func TestKubeletCalico(t *testing.T) { - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPolicy = NetworkPolicyCalico setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig diff --git a/pkg/acsengine/defaults-scheduler_test.go b/pkg/acsengine/defaults-scheduler_test.go index bae224265d..e350ba95dd 100644 --- a/pkg/acsengine/defaults-scheduler_test.go +++ b/pkg/acsengine/defaults-scheduler_test.go @@ -5,7 +5,7 @@ import ( ) func TestSchedulerDefaultConfig(t *testing.T) { - cs := createContainerService("testcluster", "1.9.6", 3, 2) + cs := CreateMockContainerService("testcluster", "1.9.6", 3, 2, false) setSchedulerConfig(cs) s := cs.Properties.OrchestratorProfile.KubernetesConfig.SchedulerConfig for key, val := range staticSchedulerConfig { @@ -23,7 +23,7 @@ func TestSchedulerDefaultConfig(t *testing.T) { } func TestSchedulerUserConfig(t *testing.T) { - cs := createContainerService("testcluster", "1.9.6", 3, 2) + cs := CreateMockContainerService("testcluster", "1.9.6", 3, 2, false) assignmentMap := map[string]string{ "--scheduler-name": "my-custom-name", "--feature-gates": "APIListChunking=true,APIResponseCompression=true,Accelerators=true,AdvancedAuditing=true", @@ -39,7 +39,7 @@ func TestSchedulerUserConfig(t *testing.T) { } func TestSchedulerStaticConfig(t *testing.T) { - cs := createContainerService("testcluster", "1.9.6", 3, 2) + cs := CreateMockContainerService("testcluster", "1.9.6", 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.SchedulerConfig = map[string]string{ "--kubeconfig": "user-override", "--leader-elect": "user-override", diff --git a/pkg/acsengine/mocks.go b/pkg/acsengine/mocks.go new file mode 100644 index 0000000000..8831773deb --- /dev/null +++ b/pkg/acsengine/mocks.go @@ -0,0 +1,90 @@ +package acsengine + +import ( + "github.com/Azure/acs-engine/pkg/api" + "github.com/Azure/acs-engine/pkg/helpers" + "github.com/satori/go.uuid" +) + +// CreateMockContainerService returns a mock container service for testing purposes +func CreateMockContainerService(containerServiceName, orchestratorVersion string, masterCount, agentCount int, certs bool) *api.ContainerService { + cs := api.ContainerService{} + cs.ID = uuid.NewV4().String() + cs.Location = "eastus" + cs.Name = containerServiceName + + cs.Properties = &api.Properties{} + + cs.Properties.MasterProfile = &api.MasterProfile{} + cs.Properties.MasterProfile.Count = masterCount + cs.Properties.MasterProfile.DNSPrefix = "testmaster" + cs.Properties.MasterProfile.VMSize = "Standard_D2_v2" + + cs.Properties.AgentPoolProfiles = []*api.AgentPoolProfile{} + agentPool := &api.AgentPoolProfile{} + agentPool.Count = agentCount + agentPool.Name = "agentpool1" + agentPool.VMSize = "Standard_D2_v2" + agentPool.OSType = "Linux" + agentPool.AvailabilityProfile = "AvailabilitySet" + agentPool.StorageProfile = "StorageAccount" + + cs.Properties.AgentPoolProfiles = append(cs.Properties.AgentPoolProfiles, agentPool) + + cs.Properties.LinuxProfile = &api.LinuxProfile{ + AdminUsername: "azureuser", + SSH: struct { + PublicKeys []api.PublicKey `json:"publicKeys"` + }{}, + } + + cs.Properties.LinuxProfile.AdminUsername = "azureuser" + cs.Properties.LinuxProfile.SSH.PublicKeys = append( + cs.Properties.LinuxProfile.SSH.PublicKeys, api.PublicKey{KeyData: "test"}) + + cs.Properties.ServicePrincipalProfile = &api.ServicePrincipalProfile{} + cs.Properties.ServicePrincipalProfile.ClientID = "DEC923E3-1EF1-4745-9516-37906D56DEC4" + cs.Properties.ServicePrincipalProfile.Secret = "DEC923E3-1EF1-4745-9516-37906D56DEC4" + + cs.Properties.OrchestratorProfile = &api.OrchestratorProfile{} + cs.Properties.OrchestratorProfile.OrchestratorType = api.Kubernetes + cs.Properties.OrchestratorProfile.OrchestratorVersion = orchestratorVersion + cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{ + EnableSecureKubelet: helpers.PointerToBool(api.DefaultSecureKubeletEnabled), + EnableRbac: helpers.PointerToBool(api.DefaultRBACEnabled), + EtcdDiskSizeGB: DefaultEtcdDiskSize, + ServiceCIDR: DefaultKubernetesServiceCIDR, + DockerBridgeSubnet: DefaultDockerBridgeSubnet, + DNSServiceIP: DefaultKubernetesDNSServiceIP, + GCLowThreshold: DefaultKubernetesGCLowThreshold, + GCHighThreshold: DefaultKubernetesGCHighThreshold, + MaxPods: DefaultKubernetesMaxPodsVNETIntegrated, + ClusterSubnet: DefaultKubernetesSubnet, + ContainerRuntime: DefaultContainerRuntime, + NetworkPlugin: DefaultNetworkPlugin, + NetworkPolicy: DefaultNetworkPolicy, + EtcdVersion: DefaultEtcdVersion, + KubeletConfig: make(map[string]string), + } + + cs.Properties.CertificateProfile = &api.CertificateProfile{} + if certs { + cs.Properties.CertificateProfile.CaCertificate = "cacert" + cs.Properties.CertificateProfile.CaPrivateKey = "cakey" + cs.Properties.CertificateProfile.KubeConfigCertificate = "kubeconfigcert" + cs.Properties.CertificateProfile.KubeConfigPrivateKey = "kubeconfigkey" + cs.Properties.CertificateProfile.APIServerCertificate = "apiservercert" + cs.Properties.CertificateProfile.APIServerPrivateKey = "apiserverkey" + cs.Properties.CertificateProfile.ClientCertificate = "clientcert" + cs.Properties.CertificateProfile.ClientPrivateKey = "clientkey" + cs.Properties.CertificateProfile.EtcdServerCertificate = "etcdservercert" + cs.Properties.CertificateProfile.EtcdServerPrivateKey = "etcdserverkey" + cs.Properties.CertificateProfile.EtcdClientCertificate = "etcdclientcert" + cs.Properties.CertificateProfile.EtcdClientPrivateKey = "etcdclientkey" + cs.Properties.CertificateProfile.EtcdPeerCertificates = []string{"etcdpeercert1", "etcdpeercert2", "etcdpeercert3", "etcdpeercert4", "etcdpeercert5"} + cs.Properties.CertificateProfile.EtcdPeerPrivateKeys = []string{"etcdpeerkey1", "etcdpeerkey2", "etcdpeerkey3", "etcdpeerkey4", "etcdpeerkey5"} + + } + + return &cs +} diff --git a/pkg/acsengine/output.go b/pkg/acsengine/output.go index dc7e0373a6..4df91cbf76 100644 --- a/pkg/acsengine/output.go +++ b/pkg/acsengine/output.go @@ -114,6 +114,9 @@ func (w *ArtifactWriter) WriteTLSArtifacts(containerService *api.ContainerServic return e } for i := 0; i < properties.MasterProfile.Count; i++ { + if len(properties.CertificateProfile.EtcdPeerPrivateKeys) <= i || len(properties.CertificateProfile.EtcdPeerCertificates) <= i { + return fmt.Errorf("missing etcd peer certificate/key pair") + } k := "etcdpeer" + strconv.Itoa(i) + ".key" if e := f.SaveFileString(artifactsDir, k, properties.CertificateProfile.EtcdPeerPrivateKeys[i]); e != nil { return e diff --git a/pkg/acsengine/output_test.go b/pkg/acsengine/output_test.go new file mode 100644 index 0000000000..29701ed508 --- /dev/null +++ b/pkg/acsengine/output_test.go @@ -0,0 +1,83 @@ +package acsengine + +import ( + "fmt" + "os" + "path" + "testing" + + "github.com/Azure/acs-engine/pkg/i18n" +) + +func TestWriteTLSArtifacts(t *testing.T) { + + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 1, 2, true) + writer := &ArtifactWriter{ + Translator: &i18n.Translator{ + Locale: nil, + }, + } + dir := "_testoutputdir" + defaultDir := fmt.Sprintf("%s-%s", cs.Properties.OrchestratorProfile.OrchestratorType, GenerateClusterID(cs.Properties)) + defaultDir = path.Join("_output", defaultDir) + defer os.RemoveAll(dir) + defer os.RemoveAll(defaultDir) + + // Generate apimodel and azure deploy artifacts without certs + err := writer.WriteTLSArtifacts(cs, "vlabs", "fake template", "fake parameters", dir, false, false) + + if err != nil { + t.Fatalf("unexpected error trying to write TLS artifacts: %s", err.Error()) + } + + expectedFiles := []string{"apimodel.json", "azuredeploy.json", "azuredeploy.parameters.json"} + + for _, f := range expectedFiles { + if _, err := os.Stat(dir + "/" + f); os.IsNotExist(err) { + t.Fatalf("expected file %s/%s to be generated by WriteTLSArtifacts", dir, f) + } + } + + os.RemoveAll(dir) + + // Generate parameters only and certs + err = writer.WriteTLSArtifacts(cs, "vlabs", "fake template", "fake parameters", "", true, true) + if err != nil { + t.Fatalf("unexpected error trying to write TLS artifacts: %s", err.Error()) + } + + if _, err := os.Stat(defaultDir + "/apimodel.json"); !os.IsNotExist(err) { + t.Fatalf("expected file %s/apimodel.json not to be generated by WriteTLSArtifacts with parametersOnly set to true", defaultDir) + } + + if _, err := os.Stat(defaultDir + "/azuredeploy.json"); !os.IsNotExist(err) { + t.Fatalf("expected file %s/azuredeploy.json not to be generated by WriteTLSArtifacts with parametersOnly set to true", defaultDir) + } + + expectedFiles = []string{"azuredeploy.parameters.json", "ca.crt", "ca.key", "apiserver.crt", "apiserver.key", "client.crt", "client.key", "etcdclient.key", "etcdclient.crt", "etcdserver.crt", "etcdserver.key", "etcdpeer0.crt", "etcdpeer0.key", "kubectlClient.crt", "kubectlClient.key"} + + for _, f := range expectedFiles { + if _, err := os.Stat(defaultDir + "/" + f); os.IsNotExist(err) { + t.Fatalf("expected file %s/%s to be generated by WriteTLSArtifacts", dir, f) + } + } + + kubeDir := path.Join(defaultDir, "kubeconfig") + if _, err := os.Stat(kubeDir + "/" + "kubeconfig.eastus.json"); os.IsNotExist(err) { + t.Fatalf("expected file %s/kubeconfig/kubeconfig.eastus.json to be generated by WriteTLSArtifacts", defaultDir) + } + os.RemoveAll(defaultDir) + + // Generate certs with all kubeconfig locations + cs.Location = "" + err = writer.WriteTLSArtifacts(cs, "vlabs", "fake template", "fake parameters", "", true, false) + if err != nil { + t.Fatalf("unexpected error trying to write TLS artifacts: %s", err.Error()) + } + + for _, region := range AzureLocations { + if _, err := os.Stat(kubeDir + "/" + "kubeconfig." + region + ".json"); os.IsNotExist(err) { + t.Fatalf("expected kubeconfig for region %s to be generated by WriteTLSArtifacts", region) + } + } +} diff --git a/pkg/operations/kubernetesupgrade/upgradecluster_test.go b/pkg/operations/kubernetesupgrade/upgradecluster_test.go index 26f054a7d0..427462b8cd 100644 --- a/pkg/operations/kubernetesupgrade/upgradecluster_test.go +++ b/pkg/operations/kubernetesupgrade/upgradecluster_test.go @@ -4,6 +4,7 @@ import ( "os" "testing" + "github.com/Azure/acs-engine/pkg/acsengine" "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/acs-engine/pkg/armhelpers" "github.com/Azure/acs-engine/pkg/i18n" @@ -28,7 +29,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to list VMs during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 1, 1) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 1, 1, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" @@ -51,8 +52,8 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { os.RemoveAll("./translations") }) - It("Should return error message when failing to detete VMs during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 1, 1) + It("Should return error message when failing to delete VMs during upgrade operation", func() { + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 1, 1, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" uc := UpgradeCluster{ @@ -72,7 +73,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to deploy template during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.13", 1, 1) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.13", 1, 1, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.6.13" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -91,7 +92,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to get a virtual machine during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 1, 6) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 1, 6, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -110,7 +111,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to get storage client during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 5, 1) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 5, 1, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -129,7 +130,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to delete network interface during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 3, 2) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 3, 2, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -148,7 +149,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing on ClusterPreflightCheck operation", func() { - cs := createContainerService("testcluster", "1.6.9", 3, 3) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 3, 3, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.8.6" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -166,7 +167,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to delete role assignment during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 3, 2) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 3, 2, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{} cs.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity = true @@ -188,7 +189,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should not fail if no managed identity is returned by azure during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 3, 2) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 3, 2, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{} cs.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity = true @@ -206,54 +207,3 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { Expect(err).To(BeNil()) }) }) - -func createContainerService(containerServiceName string, orchestratorVersion string, masterCount int, agentCount int) *api.ContainerService { - cs := api.ContainerService{} - cs.ID = uuid.NewV4().String() - cs.Location = "eastus" - cs.Name = containerServiceName - - cs.Properties = &api.Properties{} - - cs.Properties.MasterProfile = &api.MasterProfile{} - cs.Properties.MasterProfile.Count = masterCount - cs.Properties.MasterProfile.DNSPrefix = "testmaster" - cs.Properties.MasterProfile.VMSize = "Standard_D2_v2" - - cs.Properties.AgentPoolProfiles = []*api.AgentPoolProfile{} - agentPool := &api.AgentPoolProfile{} - agentPool.Count = agentCount - agentPool.Name = "agentpool1" - agentPool.VMSize = "Standard_D2_v2" - agentPool.OSType = "Linux" - agentPool.AvailabilityProfile = "AvailabilitySet" - agentPool.StorageProfile = "StorageAccount" - - cs.Properties.AgentPoolProfiles = append(cs.Properties.AgentPoolProfiles, agentPool) - - cs.Properties.LinuxProfile = &api.LinuxProfile{ - AdminUsername: "azureuser", - SSH: struct { - PublicKeys []api.PublicKey `json:"publicKeys"` - }{}, - } - - cs.Properties.LinuxProfile.AdminUsername = "azureuser" - cs.Properties.LinuxProfile.SSH.PublicKeys = append( - cs.Properties.LinuxProfile.SSH.PublicKeys, api.PublicKey{KeyData: "test"}) - - cs.Properties.ServicePrincipalProfile = &api.ServicePrincipalProfile{} - cs.Properties.ServicePrincipalProfile.ClientID = "DEC923E3-1EF1-4745-9516-37906D56DEC4" - cs.Properties.ServicePrincipalProfile.Secret = "DEC923E3-1EF1-4745-9516-37906D56DEC4" - - cs.Properties.OrchestratorProfile = &api.OrchestratorProfile{} - cs.Properties.OrchestratorProfile.OrchestratorType = api.Kubernetes - cs.Properties.OrchestratorProfile.OrchestratorVersion = orchestratorVersion - - cs.Properties.CertificateProfile = &api.CertificateProfile{} - cs.Properties.CertificateProfile.CaCertificate = "cacert" - cs.Properties.CertificateProfile.KubeConfigCertificate = "kubeconfigcert" - cs.Properties.CertificateProfile.KubeConfigPrivateKey = "kubeconfigkey" - - return &cs -} From bd2a28574f66c1f142b4b3a1e88137698397f134 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 5 Jul 2018 13:19:24 -0700 Subject: [PATCH 57/74] long-term DNS livenessProbe tests for soak cluster (#3424) --- test/e2e/kubernetes/kubernetes_test.go | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 945330101d..6b56855423 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -66,9 +66,18 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu It("should have functional DNS", func() { if !eng.HasWindowsAgents() { if !eng.HasNetworkPolicy("calico") { - pod, err := pod.CreatePodFromFile(filepath.Join(WorkloadDir, "dns-liveness.yaml"), "dns-liveness", "default") - Expect(err).NotTo(HaveOccurred()) - running, err := pod.WaitOnReady(5*time.Second, 2*time.Minute) + var err error + var p *pod.Pod + p, err = pod.CreatePodFromFile(filepath.Join(WorkloadDir, "dns-liveness.yaml"), "dns-liveness", "default") + if cfg.SoakClusterName == "" { + Expect(err).NotTo(HaveOccurred()) + } else { + if err != nil { + p, err = pod.Get("dns-liveness", "default") + Expect(err).NotTo(HaveOccurred()) + } + } + running, err := p.WaitOnReady(5*time.Second, 2*time.Minute) Expect(err).NotTo(HaveOccurred()) Expect(running).To(Equal(true)) } @@ -656,9 +665,13 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Expect(err).NotTo(HaveOccurred()) Expect(running).To(Equal(true)) restarts := pod.Status.ContainerStatuses[0].RestartCount - err = pod.Delete() - Expect(err).NotTo(HaveOccurred()) - Expect(restarts).To(Equal(0)) + if cfg.SoakClusterName == "" { + err = pod.Delete() + Expect(err).NotTo(HaveOccurred()) + Expect(restarts).To(Equal(0)) + } else { + log.Printf("%d DNS livenessProbe restarts since this cluster was created...\n", restarts) + } } }) }) From 0acbf7c4c9a74f60888862e679298fd62e875526 Mon Sep 17 00:00:00 2001 From: Lachlan Evenson Date: Thu, 5 Jul 2018 13:32:42 -0700 Subject: [PATCH 58/74] Remove DenyEscalatingExec admission controller from default list (#3423) --- docs/clusterdefinition.md | 4 ++-- pkg/acsengine/defaults-apiserver.go | 4 ++-- pkg/acsengine/defaults-apiserver_test.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index 040ea7136e..6b4e339eaf 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -340,8 +340,8 @@ Below is a list of apiserver options that acs-engine will configure by default: | apiserver option | default value | | ------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| "--admission-control" | "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" (Kubernetes versions prior to 1.9.0 | -| "--enable-admission-plugins"`*` | "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" (Kubernetes versions 1.9.0 and later | +| "--admission-control" | "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,AlwaysPullImages" (Kubernetes versions prior to 1.9.0 | +| "--enable-admission-plugins"`*` | "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,AlwaysPullImages" (Kubernetes versions 1.9.0 and later | | "--authorization-mode" | "Node", "RBAC" (_the latter if enabledRbac is true_) | | "--audit-log-maxage" | "30" | | "--audit-log-maxbackup" | "10" | diff --git a/pkg/acsengine/defaults-apiserver.go b/pkg/acsengine/defaults-apiserver.go index b62e4d05b6..28098db3f4 100644 --- a/pkg/acsengine/defaults-apiserver.go +++ b/pkg/acsengine/defaults-apiserver.go @@ -143,9 +143,9 @@ func getDefaultAdmissionControls(cs *api.ContainerService) (string, string) { // Add new version case when applying admission controllers only available in that version or later switch { case common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.9.0"): - admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages,ExtendedResourceToleration" + admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,AlwaysPullImages,ExtendedResourceToleration" default: - admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" + admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,AlwaysPullImages" } // Pod Security Policy configuration diff --git a/pkg/acsengine/defaults-apiserver_test.go b/pkg/acsengine/defaults-apiserver_test.go index 3e69c4dfee..447b21f86a 100644 --- a/pkg/acsengine/defaults-apiserver_test.go +++ b/pkg/acsengine/defaults-apiserver_test.go @@ -321,7 +321,7 @@ func TestAPIServerConfigDefaultAdmissionControls(t *testing.T) { admissonControlKey := "--admission-control" cs := CreateMockContainerService("testcluster", version, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig = map[string]string{} - cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig[admissonControlKey] = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages,ExtendedResourceToleration" + cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig[admissonControlKey] = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,AlwaysPullImages,ExtendedResourceToleration" setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig From 17b558a92990a9f9e03d3c6bba95e0002789703e Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 5 Jul 2018 14:38:09 -0700 Subject: [PATCH 59/74] E2E cleanup: "acse-test-infrastructure" is too long (#3425) --- test/e2e/cleanup.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/e2e/cleanup.sh b/test/e2e/cleanup.sh index 8d875f02be..1409d9e7ab 100755 --- a/test/e2e/cleanup.sh +++ b/test/e2e/cleanup.sh @@ -26,8 +26,6 @@ if [ -z "$EXPIRATION_IN_HOURS" ]; then EXPIRATION_IN_HOURS=2 fi -set -eu -o pipefail - az login --service-principal \ --username "${SERVICE_PRINCIPAL_CLIENT_ID}" \ --password "${SERVICE_PRINCIPAL_CLIENT_SECRET}" \ @@ -42,7 +40,7 @@ az account set -s $SUBSCRIPTION_ID_TO_CLEANUP (( deadline=$(date +%s)-${expirationInSecs%.*} )) # find resource groups created before our deadline echo "Looking for resource groups created over ${EXPIRATION_IN_HOURS} hours ago..." -for resourceGroup in `az group list | jq --arg dl $deadline '.[] | select(.id | contains("acse-test-infrastructure") | not) | select(.tags.now < $dl).name' | tr -d '\"' || ""`; do +for resourceGroup in `az group list | jq --arg dl $deadline '.[] | select(.name | startswith("acse-") | not) | select(.tags.now < $dl).name' | tr -d '\"' || ""`; do for deployment in `az group deployment list -g $resourceGroup | jq '.[] | .name' | tr -d '\"' || ""`; do echo "Will delete deployment ${deployment} from resource group ${resourceGroup}..." az group deployment delete -n $deployment -g $resourceGroup || echo "unable to delete deployment ${deployment}, will continue..." From 5e9e37701396faffca7e4a84d6f3ec5a5345abeb Mon Sep 17 00:00:00 2001 From: Lachlan Evenson Date: Thu, 5 Jul 2018 15:10:45 -0700 Subject: [PATCH 60/74] pin device plugin ds to only labelled nodes. Update device plugin version for 1.11 (#3422) --- .../kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml | 1 + pkg/acsengine/k8s_versions.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml index 63fa6ac52c..a67b6c92f0 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml @@ -53,3 +53,4 @@ spec: path: /var/lib/kubelet/device-plugins nodeSelector: beta.kubernetes.io/os: linux + accelerator: nvidia diff --git a/pkg/acsengine/k8s_versions.go b/pkg/acsengine/k8s_versions.go index 279321d414..57d0fc9c62 100644 --- a/pkg/acsengine/k8s_versions.go +++ b/pkg/acsengine/k8s_versions.go @@ -25,7 +25,7 @@ var k8sComponentVersions = map[string]map[string]string{ ContainerMonitoringAddonName: "oms:ciprod05082018", AzureCNINetworkMonitoringAddonName: "networkmonitor:v0.0.4", "cluster-autoscaler": "cluster-autoscaler:v1.3.0", - NVIDIADevicePluginAddonName: "k8s-device-plugin:1.10", + NVIDIADevicePluginAddonName: "k8s-device-plugin:1.11", "nodestatusfreq": DefaultKubernetesNodeStatusUpdateFrequency, "nodegraceperiod": DefaultKubernetesCtrlMgrNodeMonitorGracePeriod, "podeviction": DefaultKubernetesCtrlMgrPodEvictionTimeout, From e6bf885daa80a244767f67b843f22207fb165e63 Mon Sep 17 00:00:00 2001 From: Patrick Lang Date: Thu, 5 Jul 2018 16:58:46 -0700 Subject: [PATCH 61/74] Starting Windows troubleshooting steps (#3431) --- docs/kubernetes/troubleshooting.md | 47 +++++++++++++++++++++++++++++- docs/kubernetes/windows.md | 10 +++---- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/docs/kubernetes/troubleshooting.md b/docs/kubernetes/troubleshooting.md index d09a5e08d2..f0b582966b 100644 --- a/docs/kubernetes/troubleshooting.md +++ b/docs/kubernetes/troubleshooting.md @@ -17,7 +17,7 @@ CSE stands for CustomScriptExtension, and is just a way of expressing: "a script To summarize, the way that acs-engine implements Kubernetes on Azure is a collection of (1) Azure VM configuration + (2) shell script execution. Both are implemented as a single operational unit, and when #2 fails, we consider the entire VM provisioning operation to be a failure; more importantly, if only one VM in the cluster deployment fails, we consider the entire cluster operation to be a failure. -### How To Debug CSE errors +### How To Debug CSE errors (Linux) In order to troubleshoot a cluster that failed in the above way(s), we need to grab the CSE logs from the host VM itself. @@ -57,6 +57,51 @@ If after following the above you are still unable to troubleshoot your deploymen 3. The content of `/var/log/azure/cluster-provision.log` and `/var/log/cloud-init-output.log` +### How To Debug CSE Errors (Windows) + +There are two symptoms where you may need to debug Custom Script Extension errors on Windows: + +- VMExtensionProvisioningError or VMExtensionProvisioningTimeout +- `kubectl node` doesn't list the Windows node(s) + +To get more logs, you need to connect to the Windows nodes using Remote Desktop. Since the nodes are on a private IP range, you will need to use SSH local port forwarding from a master node. + +1. Get the IP of the Windows node with `az vm list` and `az vm show` + + ``` + $ az vm list --resource-group group1 -o table + Name ResourceGroup Location + ------------------------ --------------- ---------- + 29442k8s9000 group1 westus2 + 29442k8s9001 group1 westus2 + k8s-linuxpool-29442807-0 group1 westus2 + k8s-linuxpool-29442807-1 group1 westus2 + k8s-master-29442807-0 group1 westus2 + + $ az vm show -g group1 -n 29442k8s9000 --show-details --query 'privateIps' + "10.240.0.4" + ``` + +2. Forward a local port to the Windows port 3389, such as `ssh -L 5500:10.240.0.4:3389 ..cloudapp.azure.com` +3. Run `mstsc.exe /v:localhost:5500` + +Once connected, check the following logs for errors: + + - `c:\Azure\CustomDataSetupScript.log` + + +## Windows kubelet & CNI errors + +If the node is not showing up in `kubectl get node` or fails to schedule pods, check for failures from the kubelet and CNI logs. + +Follow the same steps [above](#how-to-debug-cse-errors-windows) to connect to Remote Desktop to the node, then look for errors in these logs: + + - `c:\k\kubelet.log` + - `c:\k\kubelet.err.log` + - `c:\k\azure-vnet*.log` + + + # Misconfigured Service Principal If your Service Principal is misconfigured, none of the Kubernetes components will come up in a healthy manner. diff --git a/docs/kubernetes/windows.md b/docs/kubernetes/windows.md index 5d3a38b13b..82ca710c98 100644 --- a/docs/kubernetes/windows.md +++ b/docs/kubernetes/windows.md @@ -302,6 +302,10 @@ TODO Windows support is still in active development with many changes each week. Read on for more info on known per-version issues and troubleshooting if you run into problems. +### Finding logs + +To connect to a Windows node using Remote Desktop and get logs, please read over this topic in the main [troubleshooting](troubleshooting.md#how-to-debug-cse-errors-windows) page first. + ### Checking versions Please be sure to include this info with any Windows bug reports. @@ -313,12 +317,6 @@ Kubernetes - “kernel version” - Also note the IP Address for the next step, but you don't need to share it -Windows config -Connect to the Windows node with remote desktop. This is easiest forwarding a port through SSH from your Kubernetes management endpoint. - -1. `ssh -L 5500::3389 user@masterFQDN` -2. Once connected, run `mstsc.exe /v:localhost:5500` to connect. Log in with the username & password you set for the Windows agents. - The Azure CNI plugin version and configuration is stored in `C:\k\azurecni\netconf\10-azure.conflist`. Get - mode - dns.Nameservers From aadc1d41979acc52c78accf81f911ea1a812addb Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Thu, 5 Jul 2018 17:13:13 -0700 Subject: [PATCH 62/74] Kubernetes E2E: optimized HPA test (#3428) --- test/e2e/kubernetes/deployment/deployment.go | 37 ++++++++++++++++++++ test/e2e/kubernetes/kubernetes_test.go | 8 +---- 2 files changed, 38 insertions(+), 7 deletions(-) diff --git a/test/e2e/kubernetes/deployment/deployment.go b/test/e2e/kubernetes/deployment/deployment.go index 2b7439972d..87bd7fb6d8 100644 --- a/test/e2e/kubernetes/deployment/deployment.go +++ b/test/e2e/kubernetes/deployment/deployment.go @@ -1,6 +1,7 @@ package deployment import ( + "context" "encoding/json" "fmt" "log" @@ -179,3 +180,39 @@ func (d *Deployment) CreateDeploymentHPA(cpuPercent, min, max int) error { func (d *Deployment) Pods() ([]pod.Pod, error) { return pod.GetAllByPrefix(d.Metadata.Name, d.Metadata.Namespace) } + +// WaitForReplicas waits for a minimum of n pod replicas +func (d *Deployment) WaitForReplicas(n int, sleep, duration time.Duration) ([]pod.Pod, error) { + readyCh := make(chan bool, 1) + errCh := make(chan error) + ctx, cancel := context.WithTimeout(context.Background(), duration) + var pods []pod.Pod + defer cancel() + go func() { + for { + select { + case <-ctx.Done(): + errCh <- fmt.Errorf("Timeout exceeded (%s) while waiting for %d Pod replicas from Deployment %s", duration.String(), n, d.Metadata.Name) + default: + pods, err := pod.GetAllByPrefix(d.Metadata.Name, d.Metadata.Namespace) + if err != nil { + errCh <- err + return + } + if len(pods) >= n { + readyCh <- true + } else { + time.Sleep(sleep) + } + } + } + }() + for { + select { + case err := <-errCh: + return pods, err + case _ = <-readyCh: + return pods, nil + } + } +} diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 6b56855423..4e0738294f 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -553,15 +553,9 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Expect(err).NotTo(HaveOccurred()) Expect(len(loadTestPods)).To(Equal(numLoadTestPods)) - By("Waiting 3 minutes for load to take effect") - // Wait 3 minutes for autoscaler to respond to load - time.Sleep(3 * time.Minute) - By("Ensuring we have more than 1 apache-php pods due to hpa enforcement") - phpPods, err = phpApacheDeploy.Pods() + _, err = phpApacheDeploy.WaitForReplicas(2, 5*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) - // We should have > 1 pods after autoscale effects - Expect(len(phpPods) > 1).To(BeTrue()) By("Cleaning up after ourselves") err = loadTestDeploy.Delete() From d0f9d06ec741cf691bcb3ba3a2fbf814997b1df2 Mon Sep 17 00:00:00 2001 From: Yongli Chen Date: Thu, 5 Jul 2018 17:23:33 -0700 Subject: [PATCH 63/74] update azure-npm to v0.0.4 (#3426) --- .../k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml index 06ba15756d..300cec8b96 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml @@ -75,7 +75,7 @@ spec: beta.kubernetes.io/os: linux containers: - name: azure-npm - image: containernetworking/azure-npm:v0.0.3 + image: containernetworking/azure-npm:v0.0.4 securityContext: privileged: true env: From d4567131d1e71917e3867cedd8f56c5a55ff7687 Mon Sep 17 00:00:00 2001 From: Rita Zhang Date: Thu, 5 Jul 2018 17:30:07 -0700 Subject: [PATCH 64/74] Fix KMS in multi api-server (#3430) --- parts/k8s/kubernetesmasterresources.t | 9 +++++++++ parts/k8s/kubernetesmastervars.t | 5 ++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/parts/k8s/kubernetesmasterresources.t b/parts/k8s/kubernetesmasterresources.t index db7dfaf757..9c8aedb63c 100644 --- a/parts/k8s/kubernetesmasterresources.t +++ b/parts/k8s/kubernetesmasterresources.t @@ -622,6 +622,15 @@ }, {{end}} {{if EnableEncryptionWithExternalKms}} + { + "type": "Microsoft.Storage/storageAccounts", + "name": "[variables('clusterKeyVaultName')]", + "apiVersion": "[variables('apiVersionStorage')]", + "location": "[variables('location')]", + "properties": { + "accountType": "Standard_LRS" + } + }, { "type": "Microsoft.KeyVault/vaults", "name": "[variables('clusterKeyVaultName')]", diff --git a/parts/k8s/kubernetesmastervars.t b/parts/k8s/kubernetesmastervars.t index e24b60105f..0bc6507903 100644 --- a/parts/k8s/kubernetesmastervars.t +++ b/parts/k8s/kubernetesmastervars.t @@ -486,6 +486,9 @@ {{end}} {{if EnableEncryptionWithExternalKms}} ,"apiVersionKeyVault": "2016-10-01", - "clusterKeyVaultName": "[take(concat(resourceGroup().location, '-' , uniqueString(concat(variables('masterFqdnPrefix'),'-',resourceGroup().location))), 20)]", + {{if not .HasStorageAccountDisks}} + "apiVersionStorage": "2015-06-15", + {{end}} + "clusterKeyVaultName": "[take(concat(variables('masterFqdnPrefix'), tolower(uniqueString(variables('masterFqdnPrefix')))), 20)]", "clusterKeyVaultSku" : "[parameters('clusterKeyVaultSku')]" {{end}} From 3562dd29efdb57e22f3b44b6b6ce63dc5f8f21ce Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 6 Jul 2018 09:05:18 -0700 Subject: [PATCH 65/74] Kubernetes: install blobfuse during CSE (#3401) --- parts/k8s/kubernetescustomscript.sh | 8 ++++++-- parts/k8s/kubernetesprovisionsource.sh | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/parts/k8s/kubernetescustomscript.sh b/parts/k8s/kubernetescustomscript.sh index a44cd313cf..46eda03d86 100644 --- a/parts/k8s/kubernetescustomscript.sh +++ b/parts/k8s/kubernetescustomscript.sh @@ -24,6 +24,8 @@ ERR_K8S_RUNNING_TIMEOUT=30 # Timeout waiting for k8s cluster to be healthy ERR_K8S_DOWNLOAD_TIMEOUT=31 # Timeout waiting for Kubernetes download(s) ERR_KUBECTL_NOT_FOUND=32 # kubectl client binary not found on local disk ERR_CNI_DOWNLOAD_TIMEOUT=41 # Timeout waiting for CNI download(s) +ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42 # Timeout waiting for https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb +ERR_MS_PROD_DEB_PKG_ADD_FAIL=43 # Failed to add repo pkg file ERR_OUTBOUND_CONN_FAIL=50 # Unable to establish outbound connection ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80 # Unable to configure custom search domains ERR_APT_DAILY_TIMEOUT=98 # Timeout waiting for apt daily updates @@ -154,13 +156,15 @@ function installEtcd() { } function installDeps() { + retrycmd_if_failure_no_stats 20 1 5 curl -fsSL https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT + retrycmd_if_failure 10 5 3 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL echo `date`,`hostname`, apt-get_update_begin>>/opt/m - apt_get_update || exit $ERR_APT_INSTALL_TIMEOUT + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT echo `date`,`hostname`, apt-get_update_end>>/opt/m # make sure walinuxagent doesn't get updated in the middle of running this script retrycmd_if_failure 20 5 30 apt-mark hold walinuxagent || exit $ERR_HOLD_WALINUXAGENT # See https://github.com/kubernetes/kubernetes/blob/master/build/debian-hyperkube-base/Dockerfile#L25-L44 - apt_get_install 20 30 300 apt-transport-https ca-certificates iptables iproute2 ebtables socat util-linux mount ethtool init-system-helpers nfs-common ceph-common conntrack glusterfs-client ipset jq cgroup-lite git pigz xz-utils || exit $ERR_APT_INSTALL_TIMEOUT + apt_get_install 20 30 300 apt-transport-https ca-certificates iptables iproute2 ebtables socat util-linux mount ethtool init-system-helpers nfs-common ceph-common conntrack glusterfs-client ipset jq cgroup-lite git pigz xz-utils blobfuse fuse || exit $ERR_APT_INSTALL_TIMEOUT systemctlEnableAndStart rpcbind systemctlEnableAndStart rpc-statd } diff --git a/parts/k8s/kubernetesprovisionsource.sh b/parts/k8s/kubernetesprovisionsource.sh index 58083ab71e..e722acdb3a 100644 --- a/parts/k8s/kubernetesprovisionsource.sh +++ b/parts/k8s/kubernetesprovisionsource.sh @@ -58,6 +58,7 @@ apt_get_update() { apt_update_output=/tmp/apt-get-update.out for i in $(seq 1 $retries); do timeout 30 dpkg --configure -a + timeout 30 apt-get -f -y install timeout 120 apt-get update 2>&1 | tee $apt_update_output | grep -E "^([WE]:.*)|([eE]rr.*)$" [ $? -ne 0 ] && cat $apt_update_output && break || \ cat $apt_update_output From a3906cb284af65211a78125c2fc6550b2fdf3118 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 6 Jul 2018 11:35:10 -0700 Subject: [PATCH 66/74] triage non-working KMS test implementation (#3437) --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 54c0636a51..23648aeee8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -188,7 +188,7 @@ jobs: echo 'export ORCHESTRATOR_RELEASE=1.10' >> $BASH_ENV echo 'export CLUSTER_DEFINITION=examples/e2e-tests/kubernetes/release/default/definition.json' >> $BASH_ENV echo 'export CREATE_VNET=true' >> $BASH_ENV - echo 'export ENABLE_KMS_ENCRYPTION=true' >> $BASH_ENV + echo 'export ENABLE_KMS_ENCRYPTION=false' >> $BASH_ENV echo 'export CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT}' >> $BASH_ENV echo 'export CLEANUP_IF_FAIL=false' >> $BASH_ENV echo 'export RETAIN_SSH=false' >> $BASH_ENV @@ -216,7 +216,7 @@ jobs: echo 'export ORCHESTRATOR_RELEASE=1.11' >> $BASH_ENV echo 'export CLUSTER_DEFINITION=examples/e2e-tests/kubernetes/release/default/definition.json' >> $BASH_ENV echo 'export CREATE_VNET=true' >> $BASH_ENV - echo 'export ENABLE_KMS_ENCRYPTION=true' >> $BASH_ENV + echo 'export ENABLE_KMS_ENCRYPTION=false' >> $BASH_ENV echo 'export CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT}' >> $BASH_ENV echo 'export RETAIN_SSH=false' >> $BASH_ENV echo 'export SUBSCRIPTION_ID=${SUBSCRIPTION_ID_E2E_KUBERNETES}' >> $BASH_ENV From 677ae560214b73112acdb3aece8fe8f3631b6696 Mon Sep 17 00:00:00 2001 From: Patrick Lang Date: Fri, 6 Jul 2018 11:39:27 -0700 Subject: [PATCH 67/74] Reenable azurefile test (#3402) * Reenable azurefile test * goimports * Skipping Azure file tests for v1.11+ for now * fix gofmt --- test/e2e/kubernetes/kubernetes_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 4e0738294f..7f721e0989 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -17,8 +17,10 @@ import ( "github.com/Azure/acs-engine/test/e2e/kubernetes/job" "github.com/Azure/acs-engine/test/e2e/kubernetes/networkpolicy" "github.com/Azure/acs-engine/test/e2e/kubernetes/node" + "github.com/Azure/acs-engine/test/e2e/kubernetes/persistentvolumeclaims" "github.com/Azure/acs-engine/test/e2e/kubernetes/pod" "github.com/Azure/acs-engine/test/e2e/kubernetes/service" + "github.com/Azure/acs-engine/test/e2e/kubernetes/storageclass" "github.com/Azure/acs-engine/test/e2e/kubernetes/util" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -808,9 +810,12 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } })*/ - /*It("should be able to attach azure file", func() { + It("should be able to attach azure file", func() { if eng.HasWindowsAgents() { - if common.IsKubernetesVersionGe(eng.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion, "1.8") { + if common.IsKubernetesVersionGe(eng.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion, "1.11") { + // Failure in 1.11+ - https://github.com/kubernetes/kubernetes/issues/65845 + Skip("Kubernetes 1.11 has a known issue creating Azure PersistentVolumeClaims") + } else if common.IsKubernetesVersionGe(eng.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion, "1.8") { storageclassName := "azurefile" // should be the same as in storageclass-azurefile.yaml sc, err := storageclass.CreateStorageClassFromFile(filepath.Join(WorkloadDir, "storageclass-azurefile.yaml"), storageclassName) Expect(err).NotTo(HaveOccurred()) @@ -844,6 +849,6 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } else { Skip("No windows agent was provisioned for this Cluster Definition") } - })*/ + }) }) }) From 6c532b835df2bcab7445c7a66182c1f9ab551256 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 6 Jul 2018 13:42:15 -0700 Subject: [PATCH 68/74] higher timeout, more retries for dpkg -i (#3441) --- parts/k8s/kubernetescustomscript.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parts/k8s/kubernetescustomscript.sh b/parts/k8s/kubernetescustomscript.sh index 46eda03d86..660cc0b826 100644 --- a/parts/k8s/kubernetescustomscript.sh +++ b/parts/k8s/kubernetescustomscript.sh @@ -157,7 +157,7 @@ function installEtcd() { function installDeps() { retrycmd_if_failure_no_stats 20 1 5 curl -fsSL https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT - retrycmd_if_failure 10 5 3 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL + retrycmd_if_failure 60 5 10 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL echo `date`,`hostname`, apt-get_update_begin>>/opt/m apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT echo `date`,`hostname`, apt-get_update_end>>/opt/m From 88f276fdd2c6d107087d049845342b99ad9d8669 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 6 Jul 2018 14:05:33 -0700 Subject: [PATCH 69/74] account for nil (#3442) --- test/e2e/engine/template.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/engine/template.go b/test/e2e/engine/template.go index e293c2d83c..bd9dd3c2db 100644 --- a/test/e2e/engine/template.go +++ b/test/e2e/engine/template.go @@ -227,7 +227,7 @@ func (e *Engine) HasGPUNodes() bool { func (e *Engine) HasAddon(name string) (bool, api.KubernetesAddon) { for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { if addon.Name == name { - return *addon.Enabled, addon + return helpers.IsTrueBoolPointer(addon.Enabled), addon } } return false, api.KubernetesAddon{} From 82d13c6b7eb55282dd10f31a0612a0440d1455c2 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 6 Jul 2018 15:13:12 -0700 Subject: [PATCH 70/74] Kubernetes: Azure CNI v1.0.7 (#3433) --- pkg/acsengine/defaults.go | 6 +++--- pkg/acsengine/defaults_test.go | 11 ++++++++--- pkg/api/const.go | 2 ++ 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index d264e92e4c..890519cc62 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -21,8 +21,8 @@ import ( const ( // AzureCniPluginVer specifies version of Azure CNI plugin, which has been mirrored from // https://github.com/Azure/azure-container-networking/releases/download/${AZURE_PLUGIN_VER}/azure-vnet-cni-linux-amd64-${AZURE_PLUGIN_VER}.tgz - // to https://acs-mirror.azureedge.net/cni/ - AzureCniPluginVer = "v1.0.4" + // to https://acs-mirror.azureedge.net/cni + AzureCniPluginVer = "v1.0.7" // CNIPluginVer specifies the version of CNI implementation // https://github.com/containernetworking/plugins CNIPluginVer = "v0.7.1" @@ -591,7 +591,7 @@ func setOrchestratorDefaults(cs *api.ContainerService) { a.OrchestratorProfile.KubernetesConfig.Addons[cm] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[cm], DefaultContainerMonitoringAddonsConfig) } aN := getAddonsIndexByName(a.OrchestratorProfile.KubernetesConfig.Addons, AzureCNINetworkMonitoringAddonName) - if a.OrchestratorProfile.KubernetesConfig.Addons[aN].IsEnabled(a.OrchestratorProfile.IsAzureCNI()) { + if a.OrchestratorProfile.KubernetesConfig.Addons[aN].IsEnabled(api.DefaultAzureCNINetworkMonitoringAddonEnabled) { a.OrchestratorProfile.KubernetesConfig.Addons[aN] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[aN], DefaultAzureCNINetworkMonitorAddonsConfig) } aNP := getAddonsIndexByName(a.OrchestratorProfile.KubernetesConfig.Addons, AzureNetworkPolicyAddonName) diff --git a/pkg/acsengine/defaults_test.go b/pkg/acsengine/defaults_test.go index f2578c6cc5..250c042f0a 100644 --- a/pkg/acsengine/defaults_test.go +++ b/pkg/acsengine/defaults_test.go @@ -565,23 +565,28 @@ func TestIsAzureCNINetworkmonitorAddon(t *testing.T) { properties := mockCS.Properties properties.OrchestratorProfile.OrchestratorType = "Kubernetes" properties.MasterProfile.Count = 1 + properties.OrchestratorProfile.KubernetesConfig.Addons = []api.KubernetesAddon{ + getMockAddon(AzureCNINetworkMonitoringAddonName), + } properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = "azure" setOrchestratorDefaults(&mockCS) i := getAddonsIndexByName(properties.OrchestratorProfile.KubernetesConfig.Addons, AzureCNINetworkMonitoringAddonName) if !helpers.IsTrueBoolPointer(properties.OrchestratorProfile.KubernetesConfig.Addons[i].Enabled) { - t.Fatalf("Azure CNI network plugin configuration should add Azure CNI networkmonitor addon") + t.Fatalf("Azure CNI networkmonitor addon should be present") } + mockCS = getMockBaseContainerService("1.10.3") properties = mockCS.Properties properties.OrchestratorProfile.OrchestratorType = "Kubernetes" properties.MasterProfile.Count = 1 - properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = "kubenet" + properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = "azure" + properties.OrchestratorProfile.KubernetesConfig.Addons = []api.KubernetesAddon{} setOrchestratorDefaults(&mockCS) i = getAddonsIndexByName(properties.OrchestratorProfile.KubernetesConfig.Addons, AzureCNINetworkMonitoringAddonName) if helpers.IsTrueBoolPointer(properties.OrchestratorProfile.KubernetesConfig.Addons[i].Enabled) { - t.Fatalf("Azure CNI networkmonitor addon should only be present in Azure CNI configurations") + t.Fatalf("Azure CNI networkmonitor addon should only be present if explicitly configured") } } diff --git a/pkg/api/const.go b/pkg/api/const.go index 420468f097..cb4f3fe697 100644 --- a/pkg/api/const.go +++ b/pkg/api/const.go @@ -107,6 +107,8 @@ const ( DefaultNVIDIADevicePluginAddonEnabled = false // DefaultContainerMonitoringAddonEnabled determines the acs-engine provided default for enabling kubernetes container monitoring addon DefaultContainerMonitoringAddonEnabled = false + // DefaultAzureCNINetworkMonitoringAddonEnabled Azure CNI networkmonitor addon default + DefaultAzureCNINetworkMonitoringAddonEnabled = false // DefaultTillerAddonName is the name of the tiller addon deployment DefaultTillerAddonName = "tiller" // DefaultACIConnectorAddonName is the name of the tiller addon deployment From 326220266f0d8b558d77279fc25ef3f753991666 Mon Sep 17 00:00:00 2001 From: Rita Zhang Date: Fri, 6 Jul 2018 15:14:06 -0700 Subject: [PATCH 71/74] Update clusterKeyVaultName for kv and sa (#3440) --- .circleci/config.yml | 4 ++-- parts/k8s/kubernetesmastervars.t | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 23648aeee8..54c0636a51 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -188,7 +188,7 @@ jobs: echo 'export ORCHESTRATOR_RELEASE=1.10' >> $BASH_ENV echo 'export CLUSTER_DEFINITION=examples/e2e-tests/kubernetes/release/default/definition.json' >> $BASH_ENV echo 'export CREATE_VNET=true' >> $BASH_ENV - echo 'export ENABLE_KMS_ENCRYPTION=false' >> $BASH_ENV + echo 'export ENABLE_KMS_ENCRYPTION=true' >> $BASH_ENV echo 'export CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT}' >> $BASH_ENV echo 'export CLEANUP_IF_FAIL=false' >> $BASH_ENV echo 'export RETAIN_SSH=false' >> $BASH_ENV @@ -216,7 +216,7 @@ jobs: echo 'export ORCHESTRATOR_RELEASE=1.11' >> $BASH_ENV echo 'export CLUSTER_DEFINITION=examples/e2e-tests/kubernetes/release/default/definition.json' >> $BASH_ENV echo 'export CREATE_VNET=true' >> $BASH_ENV - echo 'export ENABLE_KMS_ENCRYPTION=false' >> $BASH_ENV + echo 'export ENABLE_KMS_ENCRYPTION=true' >> $BASH_ENV echo 'export CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT}' >> $BASH_ENV echo 'export RETAIN_SSH=false' >> $BASH_ENV echo 'export SUBSCRIPTION_ID=${SUBSCRIPTION_ID_E2E_KUBERNETES}' >> $BASH_ENV diff --git a/parts/k8s/kubernetesmastervars.t b/parts/k8s/kubernetesmastervars.t index 0bc6507903..b1f4c1aa71 100644 --- a/parts/k8s/kubernetesmastervars.t +++ b/parts/k8s/kubernetesmastervars.t @@ -489,6 +489,6 @@ {{if not .HasStorageAccountDisks}} "apiVersionStorage": "2015-06-15", {{end}} - "clusterKeyVaultName": "[take(concat(variables('masterFqdnPrefix'), tolower(uniqueString(variables('masterFqdnPrefix')))), 20)]", + "clusterKeyVaultName": "[take(concat('kv', tolower(uniqueString(concat(variables('masterFqdnPrefix'),variables('location'),variables('nameSuffix'))))), 22)]", "clusterKeyVaultSku" : "[parameters('clusterKeyVaultSku')]" {{end}} From 6d1127d4af5ba73a81520b16b96f72c5124da3da Mon Sep 17 00:00:00 2001 From: Jim Minter Date: Fri, 6 Jul 2018 17:25:02 -0500 Subject: [PATCH 72/74] openshift: quote all values going into yaml files and shell scripts (#3443) --- .../release-3.9/openshiftmasterscript.sh | 6 ++-- .../release-3.9/openshiftnodescript.sh | 2 +- .../unstable/openshiftmasterscript.sh | 6 ++-- .../openshift/unstable/openshiftnodescript.sh | 2 +- pkg/acsengine/template_generator.go | 18 ++++++++++-- pkg/openshift/certgen/release39/files.go | 9 ++++++ .../certgen/release39/templates/bindata.go | 10 +++---- .../templates/master/etc/etcd/etcd.conf | 12 ++++---- .../etc/origin/master/master-config.yaml | 28 +++++++++---------- .../etc/origin/master/session-secrets.yaml | 4 +-- .../ansible/azure-local-master-inventory.yml | 4 +-- .../templates/node/etc/azure/azure.conf | 18 ++++++------ pkg/openshift/certgen/unstable/files.go | 9 ++++++ .../certgen/unstable/templates/bindata.go | 10 +++---- .../templates/master/etc/etcd/etcd.conf | 12 ++++---- .../etc/origin/master/master-config.yaml | 28 +++++++++---------- .../etc/origin/master/session-secrets.yaml | 4 +-- .../ansible/azure-local-master-inventory.yml | 4 +-- .../node/etc/origin/cloudprovider/azure.conf | 18 ++++++------ 19 files changed, 118 insertions(+), 86 deletions(-) diff --git a/parts/openshift/release-3.9/openshiftmasterscript.sh b/parts/openshift/release-3.9/openshiftmasterscript.sh index 679d561e79..3b9d2d6db4 100644 --- a/parts/openshift/release-3.9/openshiftmasterscript.sh +++ b/parts/openshift/release-3.9/openshiftmasterscript.sh @@ -54,7 +54,7 @@ fi oc adm create-bootstrap-policy-file --filename=/etc/origin/master/policy.json -( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz) +( cd / && base64 -d <<< {{ .ConfigBundle | shellQuote }} | tar -xz) cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt update-ca-trust @@ -76,7 +76,7 @@ set -x ### # retrieve the public ip via dns for the router public ip and sub it in for the routingConfig.subdomain ### -routerLBHost="{{.RouterLBHostname}}" +routerLBHost={{ .RouterLBHostname | shellQuote }} routerLBIP=$(dig +short $routerLBHost) # NOTE: The version of openshift-ansible for origin defaults the ansible var @@ -131,7 +131,7 @@ metadata: provisioner: kubernetes.io/azure-disk parameters: skuName: Premium_LRS - location: {{ .Location }} + location: {{ .Location | quote }} kind: managed EOF diff --git a/parts/openshift/release-3.9/openshiftnodescript.sh b/parts/openshift/release-3.9/openshiftnodescript.sh index b7b64e38d8..a70a8308e9 100644 --- a/parts/openshift/release-3.9/openshiftnodescript.sh +++ b/parts/openshift/release-3.9/openshiftnodescript.sh @@ -17,7 +17,7 @@ sed -i -e "s#--loglevel=2#--loglevel=4#" /etc/sysconfig/${SERVICE_TYPE}-node rm -rf /etc/etcd/* /etc/origin/master/* /etc/origin/node/* -( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz) +( cd / && base64 -d <<< {{ .ConfigBundle | shellQuote }} | tar -xz) cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt update-ca-trust diff --git a/parts/openshift/unstable/openshiftmasterscript.sh b/parts/openshift/unstable/openshiftmasterscript.sh index 7b4ac06deb..b6b012976d 100644 --- a/parts/openshift/unstable/openshiftmasterscript.sh +++ b/parts/openshift/unstable/openshiftmasterscript.sh @@ -80,7 +80,7 @@ mkdir -p /etc/origin/master oc adm create-bootstrap-policy-file --filename=/etc/origin/master/policy.json -( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz) +( cd / && base64 -d <<< {{ .ConfigBundle | shellQuote }} | tar -xz) cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt update-ca-trust @@ -102,7 +102,7 @@ set -x ### # retrieve the public ip via dns for the router public ip and sub it in for the routingConfig.subdomain ### -routerLBHost="{{.RouterLBHostname}}" +routerLBHost={{ .RouterLBHostname | shellQuote }} routerLBIP=$(dig +short $routerLBHost) # NOTE: The version of openshift-ansible for origin defaults the ansible var @@ -172,7 +172,7 @@ metadata: provisioner: kubernetes.io/azure-disk parameters: skuName: Premium_LRS - location: {{ .Location }} + location: {{ .Location | quote }} kind: managed EOF diff --git a/parts/openshift/unstable/openshiftnodescript.sh b/parts/openshift/unstable/openshiftnodescript.sh index 47dcce76a4..7c8b79aff3 100644 --- a/parts/openshift/unstable/openshiftnodescript.sh +++ b/parts/openshift/unstable/openshiftnodescript.sh @@ -49,7 +49,7 @@ sed -i -e "s#DEBUG_LOGLEVEL=2#DEBUG_LOGLEVEL=4#" /etc/sysconfig/${SERVICE_TYPE}- rm -rf /etc/etcd/* /etc/origin/master/* -( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz) +( cd / && base64 -d <<< {{ .ConfigBundle | shellQuote }} | tar -xz) cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt update-ca-trust diff --git a/pkg/acsengine/template_generator.go b/pkg/acsengine/template_generator.go index d46923ae50..95cb5d2403 100644 --- a/pkg/acsengine/template_generator.go +++ b/pkg/acsengine/template_generator.go @@ -1086,7 +1086,12 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat "OpenShiftGetMasterSh": func() (string, error) { masterShAsset := getOpenshiftMasterShAsset(cs.Properties.OrchestratorProfile.OrchestratorVersion) tb := MustAsset(masterShAsset) - t, err := template.New("master").Parse(string(tb)) + t, err := template.New("master").Funcs(template.FuncMap{ + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, + }).Parse(string(tb)) if err != nil { return "", err } @@ -1109,7 +1114,12 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat "OpenShiftGetNodeSh": func(profile *api.AgentPoolProfile) (string, error) { nodeShAsset := getOpenshiftNodeShAsset(cs.Properties.OrchestratorProfile.OrchestratorVersion) tb := MustAsset(nodeShAsset) - t, err := template.New("node").Parse(string(tb)) + t, err := template.New("node").Funcs(template.FuncMap{ + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, + }).Parse(string(tb)) if err != nil { return "", err } @@ -1151,5 +1161,9 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat "IsCustomVNET": func() bool { return isCustomVNET(cs.Properties.AgentPoolProfiles) }, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, } } diff --git a/pkg/openshift/certgen/release39/files.go b/pkg/openshift/certgen/release39/files.go index 7f2474cef8..c6ea18d53c 100644 --- a/pkg/openshift/certgen/release39/files.go +++ b/pkg/openshift/certgen/release39/files.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "os" "regexp" + "strconv" "strings" "text/template" @@ -112,6 +113,10 @@ func (c *Config) WriteMasterFiles(fs filesystem.Writer) error { h, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) return string(h), err }, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, }).Parse(string(tb)) if err != nil { return err @@ -146,6 +151,10 @@ func (c *Config) WriteNodeFiles(fs filesystem.Writer) error { t, err := template.New("template").Funcs(template.FuncMap{ "QuoteMeta": regexp.QuoteMeta, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, }).Parse(string(tb)) if err != nil { return err diff --git a/pkg/openshift/certgen/release39/templates/bindata.go b/pkg/openshift/certgen/release39/templates/bindata.go index 0822a90a70..4dda8a18e8 100644 --- a/pkg/openshift/certgen/release39/templates/bindata.go +++ b/pkg/openshift/certgen/release39/templates/bindata.go @@ -79,7 +79,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _masterEtcEtcdEtcdConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x53\x5d\x6f\xea\x38\x10\x7d\xcf\xaf\x40\xe6\x65\xf7\xa1\x90\xd2\xaf\xa5\x92\x1f\x8c\x33\x80\x85\x49\xb2\xb6\x03\x45\x55\x65\x51\x1a\x68\xb4\x14\x50\x12\xba\xad\x10\xff\xfd\xca\x49\x20\xb4\xd0\xab\xab\xdb\xfb\x96\xcc\x39\x33\x3e\x33\x73\x06\x14\x75\xb4\x4b\xfa\x80\x37\x9b\x4a\xad\x3f\x4e\xd2\x30\xae\x75\x97\x49\xba\x18\xbf\x84\x95\xed\xd6\xca\x08\x9c\x49\x05\xae\xf6\x01\x84\x0e\x04\x97\xf8\x39\x4d\x57\xc9\x6d\xbd\xbe\xd9\x54\xfe\x8a\x16\x4f\xe1\xdb\x3e\x97\xf9\x49\xc5\xfe\xbb\x26\xd3\x38\x5a\xcc\x2a\xdb\xed\x6d\xe3\xe2\x1f\x3b\xaf\xe2\x10\x45\xb4\xc3\x04\xae\xbf\x8e\xe3\xfa\x3c\x7a\xac\x87\xe9\xe4\xa9\x6e\x55\x33\x74\x48\x78\x06\x22\x54\x04\xa4\x4b\x7c\xd9\xf5\x94\xa6\x5e\xe0\x2a\x7c\x6e\xdb\x76\x51\xa8\x0b\x44\xa8\x16\x10\xa5\x99\xab\x40\x0c\x08\xc7\x57\x3b\x0c\x38\x50\xc5\x3c\x57\x2b\xd6\x07\x2f\x50\xb8\xb1\x87\x8a\x2e\x28\x67\xe0\xaa\xdf\xe8\xe3\xa6\x59\x28\xeb\x93\xbb\xbd\x3a\x89\xaf\x0e\xa2\x43\xc2\xcb\x00\xf5\x84\xc4\x96\x65\x55\xef\x27\xf3\xb5\x29\xfa\x90\xeb\x60\x2e\x53\x8c\x70\x4d\x9c\x01\x08\xc5\x24\x7c\x77\xb0\xbb\x82\x94\x07\x52\x81\xf8\x62\x95\x7f\xa4\xb6\x96\x8a\x28\xc0\x8b\xf0\xff\xd3\xb0\xf2\x7a\xe0\x62\xb3\xd7\xb3\xa2\xe9\xb3\xf3\x62\x1c\x0e\x93\xd4\x1b\x80\x18\xe1\xcf\x01\x2d\xc5\xe0\x38\xd8\x26\x9c\xb7\x08\xed\xe1\x55\xbc\x7c\x7b\x3f\x82\x7d\xe1\xdd\x8d\x70\xae\xa2\x9c\xe4\xf7\x97\x2b\x95\x60\x54\x69\x01\xd4\x73\xdb\xac\xa3\x69\x17\x68\x0f\xa3\xe9\x78\x9e\x84\x3b\x6b\x92\x40\x79\x9a\x7a\x7d\x9f\xe4\x5e\x13\xa0\xc0\x35\x5f\x18\xd9\x3b\x0e\xb8\xa4\xc5\x41\x0f\x1a\x18\xa5\xf1\x3a\x44\xb9\xd2\x7f\x03\x4f\x11\x6d\xda\x02\xd7\xd1\xad\x91\x02\x89\x2f\x1b\xcd\xcb\xe6\xf5\x4d\xa3\x79\x6d\xbc\x92\x75\xfb\x50\x14\xc9\x9b\x5c\x4e\xa7\x87\xff\xba\x4d\x18\x0f\x04\xe8\x21\x61\x0a\xa3\x2b\xdb\xde\x3f\x9a\xe3\x02\xda\x02\x64\xb7\xbc\x0e\x74\x61\x1f\x91\x1c\xb3\xb6\xdd\x91\xa0\xf3\x23\x7c\x28\x98\x82\x92\x70\xea\x15\xe2\x94\xb8\x8d\x8c\xf8\x24\x9c\xac\xe3\x28\x7d\x2f\x9c\xae\x84\x71\x85\xa3\x29\xd1\x6d\xc6\x01\x9b\x83\xcf\x8f\x7e\x32\xae\x4d\xe2\x34\x67\x15\x3b\xa3\x20\x94\x99\x6c\xf7\xc3\xc0\xb2\xe8\xa7\xe4\x24\x8c\x5f\xc3\xb8\x2c\xd0\x83\xd1\x17\x94\xff\xc2\xf7\xc3\x95\x29\x2e\xf7\x9b\xcc\x3b\x31\x87\xf7\x6b\x2a\x33\xea\x4f\xa5\xe6\x8c\x13\x7a\x57\xe1\xa1\xda\x8c\x76\x42\x72\xc6\x2a\x05\x67\xb4\x23\xd5\x56\xf5\x7e\xbe\x9c\xcd\xa2\xc5\xac\x18\xb1\x03\xad\xa0\x83\x51\xfb\xa0\x29\xee\x75\xb4\x4f\x68\x8f\x74\x40\x73\x18\x80\x49\x37\x2f\xe4\x33\xc1\x59\x06\x2a\xac\x36\x8d\xe6\x59\xad\x0f\x9e\xf5\x7d\xe1\xb5\x3f\x59\xbe\x0f\xe6\x2e\x24\x46\x8f\xe3\x24\x9a\x20\xab\x6a\x55\xef\xc7\xeb\xf4\xf9\xa1\x9c\x6f\xb7\xb8\x7e\x94\x44\x2f\xab\x79\x88\xac\x1f\x01\x00\x00\xff\xff\x20\x54\xc1\x6d\x5d\x06\x00\x00") +var _masterEtcEtcdEtcdConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x54\x5d\x6f\xea\x38\x10\x7d\xcf\xaf\x88\xcc\xcb\xee\xc3\x85\x94\xdb\x8f\xa5\x92\x1f\x8c\x33\x80\x85\x49\x52\xdb\x81\xa2\xaa\xb2\x28\x0d\x34\xda\x14\x50\x12\xba\xad\xba\xfd\xef\x2b\x27\x81\xd0\xc2\xae\x56\xba\xea\x1b\xcc\x1c\xcf\x9c\x39\x73\x26\xa0\xa8\xab\x3d\x32\x02\xfc\xfe\x6e\x37\x47\xb3\x2c\x8f\xd2\xe6\x60\x9d\xe5\xab\xd9\x73\x64\xff\x6d\x67\x4f\x51\x92\xdc\x6c\xd7\x79\x64\x7f\x7c\x58\x05\x9a\x33\xa9\xc0\xd3\x01\x80\xd0\xa1\xe0\xd2\xbc\xdc\xa4\xf1\x2a\xb7\xd1\x53\x9e\x6f\xb2\xeb\x56\x0b\xd9\xbf\xc5\xab\xc7\xe8\x75\x5f\x91\x05\x99\xed\xfc\xde\x94\x79\x1a\xaf\x96\x36\xba\x6e\xff\xfc\xc3\x41\xa7\xcb\xbb\x44\x11\xed\x32\x81\x5b\x2f\xb3\xb4\x95\xc4\x0f\xad\x28\x9f\x3f\xb6\xac\x46\x91\x9d\x10\x5e\x24\x11\xaa\x02\xd2\x23\x81\x1c\xf8\x4a\x53\x3f\xf4\x14\x3e\x73\x1c\xc7\x29\x0b\x0d\x80\x08\xd5\x05\xa2\x34\xf3\x14\x88\x31\xe1\xf8\x62\x97\x03\x0e\x54\x31\xdf\xd3\x8a\x8d\xc0\x0f\x15\x6e\xef\x53\xd5\x78\x94\x33\xf0\xd4\x2f\x0c\x78\xd5\x39\x1e\xb0\xa4\x3c\x22\xb7\x7b\xda\x12\x5f\x1c\x44\x27\x84\xd7\x01\xea\x0b\x89\x2d\xcb\x6a\xdc\xcd\x93\xad\x69\x72\x5f\x12\x64\x1e\x53\x8c\x70\x4d\xdc\x31\x08\xc5\x24\x7c\xdb\x2a\x76\x9d\x28\x0f\xa5\x02\x51\x57\x3f\x32\x0a\xc2\xdf\xd0\x4f\x4b\x45\x14\xe0\x55\xf4\xd7\xe9\xb4\xf2\x87\xe0\x61\xe3\x8e\x1f\x95\x42\x3f\xce\x2a\xed\x5c\x26\xa9\x3f\x06\x31\xc5\x5f\x03\x5a\x8a\xf1\x71\xb0\x47\x38\xef\x12\x3a\xc4\x9b\x74\xfd\xfa\x76\x94\x0e\x84\x7f\x3b\xc5\x25\x8b\x5a\xf6\x6f\xb4\x88\x54\x82\x51\xa5\x05\x50\xdf\xeb\xb1\xbe\xa6\x03\xa0\x43\x8c\x16\xb3\x24\x8b\x76\xce\x27\xa1\xf2\x35\xf5\x47\x01\x29\xad\x2c\x40\x81\x67\x7e\x61\xe4\xec\x30\xe0\x91\x2e\x07\x3d\x6e\x63\x94\xa7\xdb\x08\x95\x23\xdc\x84\xbe\x22\xda\xcc\x0b\x9e\xab\xbb\x53\x05\x12\x9f\xb7\x3b\xe7\x9d\xcb\xab\x76\xe7\xd2\x38\xae\x90\xe1\xbe\x2a\x52\x4e\xbf\x5e\x2c\x0e\xff\xeb\x1e\x61\x3c\x14\xa0\x27\x84\x29\x8c\x2e\x1c\x67\xdf\xb4\xcc\x0b\xe8\x09\x90\x83\xfa\xf8\xd0\x4f\xe7\x08\xe4\x9a\x7d\xee\x6e\x10\x9d\x1d\xe5\x27\x82\x29\xa8\x01\xa7\xba\x10\xb7\xce\x3b\xc8\x90\xcf\xa2\xf9\x36\x8d\xf3\xb7\xea\x5e\x94\x30\x76\x71\x35\x25\xba\xc7\x38\x60\xf3\x3d\x29\xbf\x29\xf3\x59\x73\x9e\xe6\x25\xaa\x5a\x26\x05\xa1\x8c\xb2\x83\x4f\x82\x15\xd1\x2f\x8f\xb3\x28\x7d\x89\xd2\xba\xc0\x10\xa6\xff\x02\xf9\x33\x7a\x3b\x5c\x99\xe2\x72\xbf\xc9\x72\x12\x73\xbe\xff\x8f\x65\x01\xfd\x4f\xaa\x25\xe2\x04\xdf\x4d\x74\xc8\xb6\x80\x9d\xa0\x5c\xa0\x6a\xc2\x05\xec\x88\xb5\xd5\xb8\x4b\xd6\xcb\x65\xbc\x5a\x56\x12\xbb\xd0\x0d\xfb\x18\xf5\x0e\x86\xe2\x7e\x5f\x07\x84\x0e\x49\x1f\x34\x87\x31\x98\xe7\xa6\x43\xa9\x09\x2e\x5e\xa0\xca\x6a\x8b\x38\x29\x6a\x7d\xf2\x6c\x10\x08\xbf\xf7\xc5\xf2\x23\x30\x77\x21\x31\x7a\x98\x65\xf1\x1c\x59\x0d\xab\x71\x37\xdb\xe6\x4f\xf7\xb5\xbe\x83\xea\xb3\x80\xb2\xf8\x79\x93\x44\xc8\xfa\x27\x00\x00\xff\xff\x6b\x4e\x0b\x40\xe2\x06\x00\x00") func masterEtcEtcdEtcdConfBytes() ([]byte, error) { return bindataRead( @@ -119,7 +119,7 @@ func masterEtcOriginMasterHtpasswd() (*asset, error) { return a, nil } -var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x58\x79\x6f\x1b\xbb\x11\xff\x5f\x9f\x82\x08\x1e\x90\xa4\xe8\xae\x24\x3b\xe7\x02\x45\xa1\xda\xce\x8b\xf0\xec\x58\x95\x9c\xa2\x40\x5d\x04\x14\x39\x5a\x31\xe2\x92\x1b\x1e\x8a\x15\x37\xdf\xbd\xe0\xb1\xa7\xa4\x38\xcd\x4b\xf1\x9c\x20\xf1\x92\x33\xc3\xe1\xcc\x8f\x73\x61\x5a\x30\xad\x99\x14\x67\x52\xac\x58\x9e\x0d\x10\x2a\xb9\xcd\x59\xeb\x1b\xa1\xbf\x59\xc6\xe9\x39\xac\xb0\xe5\x46\x87\x25\x84\x88\x27\xb0\x0a\x1b\x26\x45\xb5\x88\x10\x2e\xd9\x3f\x40\x39\x89\x19\xda\x8e\xeb\x65\x10\xdb\x0c\xfd\xeb\xdf\xf5\xf7\x86\x09\x9a\x75\x05\x87\x13\x6b\x0a\x05\x5a\x5a\x45\x40\x37\xb2\x11\xe2\xac\x60\x46\x67\xe8\xfe\x6b\x6b\x51\xc1\x27\x0b\xba\xb5\xec\xc5\x5e\x6f\x41\x29\x46\xe1\x07\x15\x6e\x29\x58\x4b\x6a\x69\x38\x93\x74\xa6\x40\x83\xf9\x31\xe9\x94\x69\xbc\xe4\x90\xa1\x15\xe6\x1a\x7a\x87\x46\x83\x4c\xba\xae\xf1\x44\xb2\x04\xa1\xd7\x6c\x65\x52\x26\x87\xd3\x02\xe7\x30\x93\x9c\x91\xdd\x0f\x3a\xe5\x0e\x88\x75\x94\x73\xcb\xdb\x76\x4e\x50\x81\x0d\x59\x7b\xf9\x13\x21\xa4\xf1\xe2\x3a\x8e\x48\xd0\x06\x76\x19\x62\x8e\x44\xa7\x1d\xb5\x28\x88\x5d\x52\x8b\x6e\xf1\x20\xb4\xc5\xdc\x42\x86\x1e\x1b\x65\xe1\x71\x6b\x47\xe0\x02\xb2\x46\x9d\x84\x82\x60\x40\x5b\x04\x52\xcc\x0f\xc1\x21\xa9\x51\x92\xa1\x52\x52\x7d\x64\x6b\xe9\xbc\xa8\x3b\x88\xf9\x08\xc4\x64\xc8\xe9\xd1\x5a\xd6\x1b\x56\x5e\xfb\x93\xb8\xd7\xe3\x0d\x66\xdc\x2a\xe8\xd1\x05\x27\xb5\x8c\x1f\xfd\x83\xf3\x5c\x41\x8e\x8d\x54\xad\xb7\xa4\xe4\xdd\xee\x8c\x33\x10\x66\x2a\x56\x32\xe8\x4e\x40\x99\x37\xcc\x79\xbf\x61\x49\x56\x4a\x0a\x93\x78\xfa\x94\x28\xe3\x09\x37\xb0\xfb\x26\xdd\x06\x76\x03\x5c\xb2\x4b\xd8\x02\xd7\xd9\x20\x71\xbe\xed\xb9\x1a\x5b\xb3\x6e\xd4\x89\x2f\xe5\x2d\x60\x0a\x2a\x2a\xe3\x95\x3b\x9b\x64\xa8\x25\x39\x21\xb8\x56\x22\x12\xc8\xa2\x90\xe2\x1d\x2e\x2a\x07\x24\x47\x94\x1a\x04\x60\x19\x85\xc3\x29\x33\x05\x2b\x76\xd7\x70\xfd\x33\x99\x43\x21\x0d\x24\x17\x8e\x26\xf1\xab\xb9\x92\xb6\x0c\xe4\xfb\x74\xbf\xba\x4d\xbf\x68\x35\x28\x87\x94\x63\x94\xef\x35\xa8\x01\x91\xc2\x28\xc9\x39\xb4\xbc\x00\x1c\x48\xf3\x20\xb8\x24\x9b\x77\x1e\x70\x35\x6c\x93\x02\x6b\x03\x2a\x69\x98\x1d\x5a\x34\xa8\x2d\x23\xb0\x70\xff\x89\xfc\x0c\x54\x7c\xec\x9a\xe5\xa2\x32\x5f\xdb\x9b\x91\x3e\x09\xfb\xb5\x01\x5b\x7e\xec\x51\x38\xff\xb5\x8e\xcc\xd0\xe3\x3f\x3d\x1e\x10\xa9\xf4\x84\x73\xf9\x19\xe8\xb5\x62\x39\x13\xde\xb3\x4f\xfe\xca\x9e\x0e\x87\xe3\x93\x97\xb7\xe9\xc8\xff\x1d\x3f\xc9\xfe\x73\xfb\xe5\x69\xbd\xc5\x25\xc1\x7c\x2d\xb5\xe9\xad\xdf\xdf\xa3\xbf\x5b\x69\xe0\x0a\x0c\x46\x4f\x98\xa0\x70\x87\xd2\x2b\x7f\xdd\x74\x3a\xd3\x68\xf4\x34\x5d\x18\xc5\x44\x8e\xbe\x7e\xed\xb1\x6e\xec\x12\x94\x00\x03\xfa\x36\xa5\x21\x26\x3d\x4c\x71\x9b\xea\x2d\xb9\x4d\x09\xb7\xee\x88\xdb\xd4\xeb\x75\x94\xed\x5b\xca\xa6\x17\x77\xc6\x39\x9c\x07\x6d\xdf\x4a\x6d\x9c\xf7\xf7\xf5\xac\xdd\x78\x4c\xcd\xae\xd8\x78\xf9\xff\x45\x9e\xbf\xd4\x77\xde\xbd\x47\x36\x7e\x79\x72\x9b\x9e\x1e\xf6\xd9\x91\x83\x1e\xb0\x5e\xcd\x15\xd7\xa9\xd0\x0d\xd4\x97\x4c\xd0\x09\xa5\x0a\xb4\xce\xd0\x28\xf5\x7f\xb2\x57\xa3\xe7\xa7\x71\xef\x1d\x98\xcf\x52\x6d\x32\x64\x48\xf9\x6c\x00\x86\xd0\x6e\x74\x22\x38\x43\xe1\x31\xa4\x6e\xb3\x09\x04\x0d\xcc\x3b\xdb\x9e\x39\x92\xd4\x30\x3f\x40\xe1\xa0\x8e\x90\x55\xdc\x3f\xdb\x04\xad\x8d\x29\x75\xe6\x5d\x73\xc0\x21\xd9\xc9\xe9\xcb\xd7\x5e\xbb\x85\x91\x0a\xe7\xd0\x5c\xb0\x31\x7b\xdc\x0a\x01\x26\x6b\x6d\xa4\x4c\x1e\x22\xec\x66\x40\x67\xc5\x85\xb3\x62\x4f\x4c\x3b\x95\x1d\x20\x6b\x0b\xf1\xc9\xaf\xd1\x6c\x25\x55\x81\x4d\x86\xae\x26\x8b\x9b\x8b\xf9\x87\xeb\xf9\xc5\xaf\x1f\xde\xcf\x2f\x93\x5f\xee\x89\x2c\x4a\x29\x40\x98\xaf\xd9\x2f\xf7\xdb\x20\xc1\x55\x2a\x1c\x1b\xd0\xa6\x2a\x02\x58\x3f\xa3\x38\xa1\x4c\x84\x37\x30\x87\x9c\x69\xa3\x76\x95\x91\x32\x44\x25\xd9\x80\x4a\x54\xdc\xa8\x10\xe4\x00\x94\x3d\x1f\x8d\x46\x83\x90\xa7\x82\x71\x63\x8a\x72\x36\xe1\x60\xf6\x5d\x4e\x70\xb2\xb4\x82\x72\x38\xe6\xed\xc8\xf9\x6d\x87\xf7\x88\x82\xcf\x4b\xa9\x4c\x86\xc6\xa3\x93\xe7\xa3\x41\xe3\x93\xb6\x5a\x4e\x09\x5c\x32\x17\x67\x41\x4d\x54\x6e\x0b\x10\x55\x9d\xa9\xac\x30\xac\x80\x84\xb4\xca\xd1\xc4\x51\xeb\xa1\x06\x63\x98\xc8\x75\xba\x79\xe5\x5c\x3e\xdc\x8e\x31\x2f\xd7\x78\xfc\x97\x3a\x5b\xeb\xe0\xb3\x64\x89\xc9\x06\x04\xad\xb8\x1d\xae\x4e\x3b\x04\x05\x50\x86\x13\xb3\x2b\xa1\x39\xa1\xe4\x8c\xf8\xba\x67\xb8\x15\x34\x6d\xa1\xab\x54\xd2\xc8\xa5\x5d\xc5\xec\x28\x2d\x75\x99\x6f\xcb\xea\x94\x9a\xa0\x47\xf8\x8b\x55\xf0\xa8\x45\xd1\xd5\xff\xd1\x10\x0c\x19\x7a\xa2\xf0\x6f\xea\xf6\x1d\x7d\x93\x0e\x7a\x86\x88\x21\xc1\xa7\x0e\x26\xf2\xc4\x79\x28\x59\x39\xeb\x77\x64\x4a\x9f\x32\x86\xc1\x21\xc3\xf0\x7a\x1f\x1d\x14\xb0\x81\xdd\xf7\xf0\x6f\x60\xf7\xe8\xff\x72\xd3\x22\x22\xc0\x0a\x07\x8f\x7a\x61\x3a\xcb\xd0\xfd\xfd\x43\x99\xca\xe3\x8a\x5e\x6c\x99\xcf\xeb\x37\xac\x00\x69\x4d\x86\x84\xe5\xfc\xe1\xaa\x2b\xa2\x35\x56\x3a\x6d\x40\xef\x43\xba\x43\x14\x00\xad\xc9\x1a\xa8\xed\x78\xa8\x3a\xb8\xde\x0a\xc0\x0e\x92\x0e\xd8\xb5\xa6\x4b\x3f\x6a\x5f\x22\xc7\xc2\x40\xbf\x93\x14\x66\x52\x99\x39\x16\xb9\x2b\x94\x1f\xb7\xf6\x16\x76\x29\xc0\xd9\xea\xe5\x49\x7a\xea\x03\xfb\x70\xfc\xc2\xed\xbb\xf2\x9c\x38\xce\x50\x9e\xb9\x36\x2b\x1a\xd7\xab\xed\x01\x04\x31\x97\xfe\x56\xe3\xf8\x2c\xd6\x75\x42\x84\xe2\xa8\xd7\x2f\x61\x42\xa0\x74\xdb\x06\x84\xb9\xd9\x95\x4e\xf0\x77\x3c\x8a\x3f\xb7\x69\xe2\xe5\x10\x5a\x5a\xe5\xe2\xdc\xb3\xd1\x68\x10\xbb\x94\x4a\xea\x77\x09\xf5\x4c\x9f\x4a\x9d\xa1\x13\x2f\x61\xff\x32\xee\xb7\x18\x4c\x82\xd1\xea\x30\x7e\x29\x65\xe9\xde\xff\x1f\x70\xdd\x17\xbf\xfb\xba\xa7\x5e\xc2\xde\x5d\xda\xb7\xed\xd7\xb0\x5e\x60\x78\x83\x11\x05\x33\xbb\xe4\x8c\xbc\x9f\x5f\x66\x9d\xac\x7b\xb4\xba\xca\x5a\x39\xd9\x61\xd1\x3d\x37\x11\xca\x86\x26\x5e\xc7\x68\x12\xcb\x89\xb3\xe9\xf9\xdc\xc5\xf8\x74\x7c\xf2\x2a\x00\xf3\xd9\x1e\x4d\x4c\xfc\x84\x51\xb5\x4f\x8a\x90\x2b\x5d\x03\xc2\x2f\x41\xe4\x66\x9d\xa1\xd7\x2d\x4f\x4f\x67\xad\x93\xa2\xa4\x58\xdd\x0c\x9d\x89\x0e\x73\x47\xad\x67\x7e\xa0\x11\x4a\x7e\x05\x74\x8d\x4d\x53\x43\x25\x72\xab\x13\xed\x39\x9b\xa7\xd6\xbd\x55\xe7\xb9\xc9\x6e\x43\x85\xb5\x06\xf3\x13\x0c\x3c\x24\x52\x68\xc9\x61\x38\x70\xdd\x10\xf6\x40\xad\xa3\x68\x01\x66\x2d\x69\x86\xb0\x35\xae\x24\x61\x14\x84\x61\x66\x37\x8b\xa1\x58\x67\x83\xfb\xfb\x04\xb1\x15\x4a\x2f\x04\x5e\x72\x98\x4c\xce\x27\xd6\xac\x1d\x55\x00\x9a\x8f\x97\x49\xec\xb2\x27\x2e\x0a\xa3\xc9\x79\x80\xe6\x1a\x73\x0e\x3e\xd6\x34\x93\x08\x2e\x73\x26\x5a\x4d\x6f\x81\xcb\x92\x89\xfc\x2a\xaa\x41\x38\x66\x85\xdf\xe8\x26\x83\x23\x63\x86\x50\x86\x5c\x97\x20\xa6\xe7\xd3\x9e\xea\x55\x0b\x15\x42\xf5\xb9\x8f\xfc\xa9\x57\x30\xdc\x3f\x9d\x4c\xce\x63\x1c\x3f\x0f\x51\xbf\x21\x5f\x00\x51\x2e\x1c\x1e\x65\x09\x04\x6d\x36\xcc\x8a\xd6\xf8\x80\xd1\xf6\xc4\x43\xdb\x65\xfd\x55\x2a\x58\x81\x52\x40\xdf\xc7\x8e\xb3\x4d\x68\x05\xfb\x64\xe1\x83\x5b\xae\x57\xfb\x34\x9d\x4d\x28\x30\xe3\xed\x5d\xbf\x10\xbf\xab\x8a\x38\x1a\xd0\x9a\xb5\x54\xec\x0b\x34\x48\xf2\xce\x48\x0b\x46\x94\xd4\x72\x65\xa4\xe0\x4c\xb8\x24\x5a\x0c\xfb\x17\xbf\x01\x81\xa3\xa1\x86\x1e\xa6\x27\xc3\x5a\x5e\x7d\x82\x91\x1b\x10\x3f\x49\xba\x97\xe5\xb1\x07\x82\x76\x30\x76\xe9\xda\x16\x54\x62\xad\x3f\x4b\x45\xfb\x48\xab\x81\xf5\x73\x81\xb6\x3a\x96\x6d\xd7\xc6\x6b\x42\x3b\x80\x7c\x7b\x33\xf3\x8b\xb3\xa8\xe4\x01\x68\xc6\x24\x3a\xd9\x2f\x8f\x7f\x5e\x60\xad\x64\xfd\x5e\x29\x1a\x7a\x53\xdc\x7a\xe9\x0a\xdf\x4d\x72\x58\xb8\x9c\x40\x5d\x4a\xa9\xb2\x52\xdc\x0e\x61\x51\x6b\xd1\x5e\x0c\x4f\x47\x1f\xaf\x5f\x02\x59\xa2\x03\x5d\xba\xc3\x85\x03\xb4\x07\x44\x5b\x05\x97\x55\xb5\xbe\x71\xcb\x3d\x35\x5e\xbd\xa8\x8a\x81\x1a\xa3\x87\xc8\x9e\x8f\x46\x83\x12\x5b\xed\x50\xd8\x8c\x49\x42\xa8\x2a\x7b\xad\xd2\x52\x4a\xa3\x8d\xc2\x65\xe8\xa1\x8e\x2a\x1f\xf8\xaa\xca\xab\xce\x04\x53\xb1\x52\x58\x1b\x65\x89\xb1\x2a\x94\x52\x25\x26\x9d\x29\x11\x73\x24\x6d\x9e\xc5\x1a\x2b\xa0\xf5\x68\xf2\x10\xd3\xa0\x54\xf2\x23\x90\x56\x40\x8f\x8d\x9a\x2b\xd8\x16\x7e\x32\x25\x55\x86\x84\xa4\x90\x28\xc9\x21\xed\xf4\xb1\x43\xd7\x3a\x5a\x03\x55\x4f\x13\x85\xcd\xc3\x1c\xef\x0a\xb4\xc6\x75\x9d\xd8\xdd\xbb\x81\xa2\x74\xfd\x65\x5d\x44\x12\xab\x98\xd9\x4d\x38\x97\x04\xbb\x23\xc3\x8b\x23\xba\x5e\x89\x35\xa7\x1e\x65\xc3\x93\x6a\xf3\x12\x2f\x81\xeb\x19\xa8\x59\x10\x9e\xa1\xe7\x61\x14\xc7\x68\x9f\x6f\x3c\xaa\x7e\x92\xf1\xeb\xea\x67\xe8\x57\x07\x4a\x5a\xd7\xaa\x35\x36\xd0\x76\x49\x65\x81\xdd\xeb\xbf\xb9\xb8\x9a\xcd\xaf\xdf\xdf\x5c\xcc\xa7\xb3\x54\xb0\xd2\xf5\xdd\x31\x0f\x4f\x08\x71\xed\x41\xc3\xe6\xc7\xff\x01\x9c\x73\x17\xa2\x41\x10\xd0\x4d\xea\x2a\xb0\xc0\x39\xd0\x7a\x4a\x99\x54\xb6\xf6\xbf\xfb\x29\xb0\x7f\xd8\x6e\xbd\xe4\x72\xf7\xc0\x2b\x2f\x15\xdb\x62\x03\xbf\xf5\xa6\x77\x38\x68\xe5\xea\x35\xbf\x5f\xf5\xb9\x3e\x1a\x44\xe2\x78\xfc\x1e\x87\xa7\xf1\x0c\x3a\x4c\x15\xab\xfe\xe4\xe0\xdc\xe6\xe0\x4b\xdf\x9f\xe2\x1c\xe8\x6d\xb4\xef\xa5\xab\x66\xbe\x1e\xf1\xd6\xd3\x9c\x7e\x9b\x13\xe9\xc3\x4d\x0a\x7c\x17\x41\xa4\xa7\xe2\x0d\x67\xf9\xda\x84\x97\x58\x0f\x90\x63\xc3\xd5\x0d\x2a\x5b\xc9\x6d\xd1\x9a\x87\xd0\x9d\xc0\x05\x23\x3e\xa0\xba\x68\xc1\x44\x1e\xea\x13\x1a\x43\xfe\x7f\x03\x00\x00\xff\xff\x2a\xb2\x27\xe9\x7c\x1a\x00\x00") +var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x7b\x6f\x1b\xb9\x11\xff\x5f\x9f\x82\x10\x0e\x48\x52\x74\x57\xb2\x9d\xe7\x02\x45\xa1\xda\xce\xc5\x38\x3b\x56\x65\xa7\x28\x50\x17\x01\x45\x8e\x56\x8c\xb8\xe4\x86\x0f\xc5\x8a\x2f\xdf\xbd\xe0\x63\x77\xb9\x7a\x24\x41\x9a\xc3\x39\x41\x22\x91\x33\xc3\xe1\xcc\x8f\xf3\x32\xa6\x15\xd3\x9a\x49\x71\x2a\xc5\x82\x95\xc5\x00\xa1\x9a\xdb\x92\x25\xdf\x11\xfa\x87\x65\x9c\x9e\xc1\x02\x5b\x6e\x74\x58\x42\x88\x78\x02\xab\xb0\x61\x52\x34\x8b\x08\xe1\x9a\xfd\x0b\x94\x93\x58\xa0\xf5\x51\xbb\x0c\x62\x5d\xa0\xff\xfc\xb7\xfd\xbe\x62\x82\x16\x7d\xc1\xe1\xc4\x96\x42\x81\x96\x56\x11\xd0\x9d\x6c\x84\x38\xab\x98\xd1\x05\x7a\xf8\x92\x2c\x2a\xf8\x68\x41\x27\xcb\x5e\xec\xf5\x1a\x94\x62\x14\x7e\x50\xe1\x44\xc1\x56\x52\xa2\xe1\x54\xd2\xa9\x02\x0d\xe6\xc7\xa4\x53\xa6\xf1\x9c\x43\x81\x16\x98\x6b\xd8\x3a\x34\x1a\x64\xd2\x77\x8d\x27\x92\x35\x08\xbd\x64\x0b\x93\x33\x39\xba\xa8\x70\x09\x53\xc9\x19\xd9\xfc\xa0\x53\xee\x81\x58\x47\x39\xb3\x3c\xb5\x73\x86\x2a\x6c\xc8\xd2\xcb\x9f\x08\x21\x8d\x17\xd7\x73\x44\x86\x56\xb0\x29\x10\x73\x24\x3a\xef\xa9\x45\x41\x6c\xb2\x56\x74\xc2\x83\xd0\x1a\x73\x0b\x05\x7a\x64\x94\x85\x47\xc9\x8e\xc0\x15\x14\x9d\x3a\x19\x05\xc1\x80\x26\x04\x52\xcc\xf6\xc1\x21\x6b\x51\x52\xa0\x5a\x52\x7d\x60\x6b\xee\xbc\xa8\x7b\x88\xf9\x00\xc4\x14\xc8\xe9\x91\x2c\xeb\x15\xab\xaf\xfd\x49\xdc\xeb\xf1\x1a\x33\x6e\x15\x6c\xd1\x05\x27\x25\xc6\x8f\xfe\xc1\x65\xa9\xa0\xc4\x46\xaa\xe4\x2d\x29\x79\xbf\x39\xe5\x0c\x84\xb9\x10\x0b\x19\x74\x27\xa0\xcc\x6b\xe6\xbc\xdf\xb1\x64\x0b\x25\x85\xc9\x3c\x7d\x4e\x94\xf1\x84\x2b\xd8\x7c\x95\x6e\x05\x9b\x01\xae\xd9\x25\xac\x81\xeb\x62\x90\x39\xdf\x6e\xb9\x1a\x5b\xb3\xec\xd4\x89\x2f\xe5\x0d\x60\x0a\x2a\x2a\xe3\x95\x3b\x9d\x14\x28\x91\x9c\x11\xdc\x2a\x11\x09\x64\x55\x49\xf1\x16\x57\x8d\x03\xb2\x03\x4a\x0d\x02\xb0\x8c\xc2\xe1\x94\xa9\x82\x05\xbb\xef\xb8\xfe\x9d\xcd\xa0\x92\x06\xb2\x73\x47\x93\xf9\xd5\x52\x49\x5b\x07\xf2\x5d\xba\x5f\xdd\xa6\x5f\xb4\x1a\x94\x43\xca\x21\xca\x77\x1a\xd4\x80\x48\x61\x94\xe4\x1c\x12\x2f\x00\x07\xd2\x3d\x08\x2e\xc9\xea\xad\x07\x5c\x0b\xdb\xac\xc2\xda\x80\xca\x3a\x66\x87\x16\x0d\x6a\xcd\x08\xdc\xb8\xff\x44\x79\x0a\x2a\x3e\x76\xcd\x4a\xd1\x98\x2f\xf5\x66\xa4\xcf\xc2\x7e\x6b\xc0\xc4\x8f\x5b\x14\xce\x7f\xc9\x91\x05\x7a\xf4\x97\x47\x03\x22\x95\x9e\x70\x2e\x3f\x01\xbd\x56\xac\x64\xc2\x7b\xf6\xf1\xdf\xd9\x93\xd1\xe8\xe8\xf8\xc5\x5d\x3e\xf6\x7f\x8f\x1e\x17\xbf\xdf\x7d\x7e\xd2\x6e\x71\x49\x30\x5f\x4a\x6d\xda\xf5\x87\x07\x54\x2b\x26\x0c\x1a\x06\x8a\x21\x7a\xfc\x4f\x2b\x0d\x5c\x81\xc1\xe8\x31\x13\x14\xee\x51\x7e\xe5\x2f\x9e\x5f\x4c\x35\x1a\x3f\xc9\x6f\x8c\x62\xa2\x7c\x82\x86\x4e\xc8\xdd\xe7\x27\x43\xf4\x3b\xfa\xe8\x78\xd0\x97\x2f\xed\x49\x2b\x3b\x07\x25\xc0\x80\xbe\xcb\x69\x08\x55\x5b\xba\xec\xa1\xb8\xcb\xf5\x9a\xdc\xe5\x84\x5b\x77\xde\x5d\xee\xd5\x3d\xc8\xf6\x7d\x77\xc8\xcf\xef\x8d\x43\x04\x0f\x97\x78\x23\xb5\x71\xf0\xf8\x86\xfa\xad\xd3\x77\xb5\xff\xfa\x69\xd1\x54\x3f\x7c\x8c\x37\xc1\x77\x5a\x6a\x8b\xec\xe8\xc5\xf1\x5d\x7e\xb2\xdf\xf1\x07\x0e\xfa\x86\xad\x5b\xae\xb8\x4e\x85\xee\xde\xcb\x9c\x09\x3a\xa1\x54\x81\xd6\x05\x1a\xe7\xfe\x4f\xf1\x72\xfc\xec\x24\xee\xbd\x05\xf3\x49\xaa\x55\x81\x0c\xa9\x9f\x0e\xc0\x10\xda\x0f\x71\x04\x17\x28\xbc\xa8\xdc\x6d\x76\xd1\xa4\x7b\x2b\xbd\x6d\xcf\x1c\x49\xda\xb7\xb2\x87\xc2\xbd\x17\x84\xac\xe2\xfe\xed\xa7\xfe\x5a\x1a\x53\xeb\xc2\x79\x6c\xdb\x4d\x68\x58\x1c\x9f\xbc\x78\xd5\xf3\x91\x93\x79\x63\xa4\xc2\x25\x74\x97\xee\x5c\x11\xb7\x42\xe4\x2a\x92\x8d\x9c\xc9\x7d\x84\xfd\xd4\xea\x2c\x7b\xe3\x2c\xbb\x25\x26\xcd\x91\x7b\xc8\x52\x21\x3e\xab\x76\x9a\x2d\xa4\xaa\xb0\x29\xd0\xd5\xe4\xe6\xf6\x7c\xf6\xfe\x7a\x76\xfe\xeb\xfb\x77\xb3\xcb\xec\x97\x07\x22\xab\x5a\x0a\x10\xe6\x4b\xf1\xcb\xc3\x3a\x48\x70\x25\x10\xc7\x06\xb4\x69\xaa\x0b\xb6\x9d\xaa\x9c\x50\x26\xc2\xdb\x99\x41\xc9\xb4\x51\x9b\xc6\x5e\x05\xa2\x92\xac\x40\x65\x2a\x6e\x34\xa8\x72\xa0\x2a\x9e\x8d\xc7\xe3\x41\x48\x80\xc1\xce\x31\xf7\x39\x9b\x70\x30\xbb\x30\x20\x38\x9b\x5b\x41\x39\x1c\x42\x40\xe4\xfc\x3a\x08\xb6\x88\x02\x0e\x6a\xa9\x4c\x81\x8e\xc6\xc7\xcf\xc6\x83\xce\x27\xa9\x5a\x4e\x09\x5c\x33\x17\xc0\x41\x4d\x54\x69\x2b\x10\x4d\x01\xab\xac\x30\xac\x82\x8c\x24\x75\x6e\xe6\xa8\xf5\x48\x83\x31\x4c\x94\x3a\x5f\xbd\x74\x2e\x1f\xad\x8f\x30\xaf\x97\xf8\xe8\x6f\x6d\x19\xa0\x83\xcf\xb2\x39\x26\x2b\x10\xb4\xe1\x76\xb8\x3a\xe9\x11\x54\x40\x19\xce\xcc\xa6\x86\xee\x84\x9a\x33\xe2\x0b\xaa\xd1\x5a\xd0\x3c\x41\x57\xad\xa4\x91\x73\xbb\x88\x69\x57\x5a\xea\x52\xea\x9a\xb5\xb9\x3a\x43\x43\xfc\xd9\x2a\x18\x26\x14\x7d\xfd\x87\x23\x30\x64\xe4\x89\xc2\xbf\xb9\xdb\x77\xf4\x5d\x9e\xd9\x32\x44\x0c\x13\x3e\x27\x31\x51\x66\xce\x43\xd9\xc2\x59\xbf\x27\x53\xfa\x5c\x34\x0a\x0e\x19\x85\x17\x3d\xdc\x2b\x60\x05\x9b\xef\xe1\x5f\xc1\x66\xf8\x87\xdc\xb4\x8a\x08\xb0\xc2\xc1\xa3\x5d\xb8\x98\x16\x2e\x60\x7c\x2d\xf1\xa5\x31\xc2\xe1\x8b\x9e\xaf\x99\x2f\x1c\x6e\x59\x05\xd2\x9a\x02\x09\xcb\xf9\xb7\xcb\xba\x88\xda\x58\x4a\xa5\xc0\xde\x85\x76\x8f\x28\x00\x5b\x93\x25\x50\xdb\xf3\x54\x73\x70\xbb\x15\x00\x1e\x24\xed\xb1\x6f\x4b\x97\x7f\xd0\xbe\x06\x8f\x95\x87\x7e\x2b\x29\x4c\xa5\x32\x33\x2c\x4a\x57\x89\x3f\x4a\xf6\x6e\xec\x5c\x80\xb3\xd9\x8b\xe3\xfc\xc4\x07\xfd\xd1\xd1\x73\xb7\xef\xea\x7f\xe2\x38\x43\xfd\xe7\xfa\xb8\x68\x64\xaf\xb6\x07\x12\xc4\x5c\xfc\x5b\x8b\xe7\xd3\x58\x38\x0a\x11\xaa\xaf\xad\x86\x0c\x13\x02\xb5\xdb\x36\x20\xcc\xed\xa6\x76\x82\xbf\xe3\x71\xfc\x35\xa5\x89\x97\x43\x68\x6e\x95\x8b\x77\x4f\xc7\xe3\x41\x6c\x83\x1a\xa9\xdf\x25\xd4\x33\x7d\xac\x75\x81\x8e\xbd\x84\xdd\xcb\xb8\x4f\x31\xa8\x04\xa3\xb5\xe1\xfc\x52\xca\xda\xc5\x81\x3f\xe1\xba\xcf\xff\xef\xeb\x9e\x78\x09\x3b\x77\x49\x6f\xbb\x5d\x24\x7b\x81\xe1\x2d\x46\x14\x4c\xed\x9c\x33\xf2\x6e\x76\x59\xec\xcf\xc8\xfb\xcb\x34\x34\x2c\xba\x6c\xed\x20\x99\xbe\x3e\x11\x2a\x8c\x2e\x8c\xc7\x20\x13\x2b\x8f\xd3\x8b\xb3\x99\x0b\xfd\xf9\xd1\xf1\xcb\x80\xd3\xa7\x3b\x34\xb1\x46\x20\x8c\xaa\x5d\x52\x84\x5c\xa9\x1c\x00\x7f\x09\xa2\x34\xcb\x02\xbd\x4a\x1c\x7f\x31\x4d\x4e\x8a\x92\x62\x21\x34\x72\x16\xdb\xcf\x1d\xb5\x9e\xfa\x01\x4a\x68\x31\x14\xd0\x25\x36\x5d\xb9\x95\xc9\xb5\xce\xb4\xe7\xec\x5e\x5e\xff\x56\xbd\xd7\x27\xfb\x0d\x1c\xd6\x1a\xcc\xcf\xb2\xf7\x70\x44\xa4\xd0\x92\xc3\x68\xd8\x8f\x7c\xa5\xc2\x1e\xc9\x6d\xb8\xad\xc0\x2c\x25\x2d\x10\xb6\xc6\xd5\x2e\x8c\x82\x30\xcc\x6c\xa6\x31\x66\xeb\x62\xf0\xf0\x90\x21\xb6\x40\xf9\xb9\xc0\x73\x0e\x93\xc9\xd9\xc4\x9a\xa5\xa3\x0a\x48\xf4\x62\xb3\xd8\xe7\x4f\x5c\xb8\x46\x93\xb3\x80\xdd\x25\xe6\x1c\x7c\x30\xea\x66\x21\x5c\x96\x4c\x24\x6d\x77\x85\xeb\x9a\x89\xf2\x2a\xaa\x41\x38\x66\x95\xdf\xe8\x67\x8d\x03\x83\x8e\x50\xaf\x5c\xd7\x20\x2e\xce\x2e\xb6\x54\x6f\x9a\xb8\x10\xcb\xcf\xbc\x45\x73\xaf\x60\xb8\x7f\x3e\x99\x9c\xc5\x40\x7f\xd6\x37\x52\xc7\x76\x03\x44\xb9\xb8\x79\x90\x35\x10\xec\x63\xc7\xac\x4a\x06\x1a\x8c\xa6\x33\x18\x6d\xe7\xed\xb7\x5a\xc1\x02\x94\x02\xfa\x2e\xf6\xc0\x29\xa1\x15\xec\xa3\x85\xf7\x6e\xb9\x5d\xdd\xa6\xe9\x6d\x42\x85\x19\x4f\x77\xfd\x42\xfc\xde\x94\xd7\xd1\xa0\xd6\x2c\xa5\x62\x9f\x61\x1f\xd6\xbc\x9b\xf2\x8a\x11\x25\xb5\x5c\x18\x29\x38\x13\x2e\x0f\x57\x0e\x85\xa9\x25\x6e\x41\x60\x6f\xc1\xe1\xc8\x03\xfa\x78\xd4\xca\x1d\xee\x9a\x05\x21\x23\x57\x20\x7e\xf2\x89\x5e\x66\xef\x34\x87\x59\x10\xb4\x87\xcd\x4b\xd7\x2b\xa1\x1a\x6b\xfd\x49\x2a\xba\x8d\xd0\x16\x90\x3f\x17\xa0\x8b\x43\x69\x7c\x69\xbc\x26\xb4\x07\xe4\x37\xb7\x53\xbf\x38\x8d\x4a\xee\x81\x74\xcc\xce\x93\xdd\xfa\xfb\x0f\x8b\xd8\x8d\xe8\x9f\x2c\x54\xc3\xd6\x74\xba\x5d\xba\xc2\xf7\x93\x12\x6e\x5c\x2a\xa2\x2e\x93\x35\xc9\x30\x6e\x87\xf0\xab\xb5\x48\x17\xc3\x43\xd4\x87\xcb\xa6\x40\x96\xe9\x40\x97\x6f\x70\xe5\x9e\x85\x87\x4e\xaa\x82\x4b\xe6\x5a\xdf\xba\xe5\x2d\x35\x5e\x3e\x6f\x6a\x90\x16\xe1\xfb\xc8\x9e\x8d\xc7\x83\x1a\x5b\xed\xe0\xda\x8d\x7f\x42\x00\xac\xb7\x3a\xb5\xb9\x94\x46\x1b\x85\xeb\xd0\xc2\x1d\x54\x3e\xf0\x35\x05\x5f\x9b\x71\x2e\xc4\x42\x61\x6d\x94\x25\xc6\xaa\x50\xc1\xd5\x98\xf4\xa6\x5f\xcc\x91\xa4\x3c\x37\x4b\xac\x80\xb6\x23\xd7\x7d\x4c\x83\x5a\xc9\x0f\x40\x92\x34\x11\xfb\x44\x57\x27\xde\xf8\x89\x9b\x54\x05\x12\x92\x42\xa6\x24\x87\xbc\xd7\x46\x8f\x5c\xe7\x6a\x0d\x34\x2d\x55\x14\x36\x0b\xf3\xc9\x2b\xd0\x1a\xb7\xe5\x69\x7f\xef\x16\xaa\xda\xb5\xb7\x6d\xed\x4a\xac\x62\x66\x33\xe1\x5c\x12\xec\x8e\x0c\xef\x91\xe8\x76\x25\x96\xba\x7a\x5c\x8c\x8e\x9b\xcd\x4b\x3c\x07\xae\xa7\xa0\xa6\x41\x78\x81\x9e\x85\x11\x23\xa3\xdb\x7c\x47\xe3\xe6\x27\x3b\x7a\xd5\xfc\x8c\xfc\xea\x40\x49\xeb\x3a\xc5\xce\x06\xda\xce\xa9\xac\xb0\x8b\x0d\xb7\xe7\x57\xd3\xd9\xf5\xbb\xdb\xf3\xd9\xc5\x34\x17\xac\x76\x6d\x7f\xcc\xf7\x13\x42\x5c\x77\xd2\xb1\xf9\x5f\x6b\x04\x70\xce\x5c\xa0\x07\x41\x40\x77\x09\xb1\xc2\x02\x97\x40\xdb\xe9\x6b\xd6\xd8\xda\x7f\xf6\xd3\x6d\xff\xec\xdd\x7a\xcd\xe5\xe6\x1b\x31\xa0\x56\x6c\x8d\x0d\xfc\xb6\x35\x95\xc4\x41\x2b\x57\x26\xfa\xfd\xa6\xcd\xf6\xb1\x22\x12\xc7\xe3\x77\x38\x3c\x8d\x67\xd0\x61\x5a\xda\xb4\x45\xbd\x51\x52\x17\x16\x9a\xa1\xd2\xd7\x9e\xfe\xee\xa4\x69\x4f\x8f\xa5\x7d\x6f\xdf\x0c\x17\xda\x59\x76\x3b\x71\xda\x6e\xb7\x22\x7d\xb8\x5a\x85\xef\x23\xaa\xf4\x85\x78\xcd\x59\xb9\x34\xe1\x69\xb6\x93\xf2\xd8\xf8\xf5\xa3\xcc\x5a\x72\x5b\x25\xf3\x19\xba\x11\xb8\x62\xc4\xc7\x5f\x17\x3e\x98\x28\x43\x19\x44\x63\x86\xf8\x5f\x00\x00\x00\xff\xff\x3e\x98\x14\x40\x65\x1b\x00\x00") func masterEtcOriginMasterMasterConfigYamlBytes() ([]byte, error) { return bindataRead( @@ -159,7 +159,7 @@ func masterEtcOriginMasterSchedulerJson() (*asset, error) { return a, nil } -var _masterEtcOriginMasterSessionSecretsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4a\x2c\xc8\x0c\x4b\x2d\x2a\xce\xcc\xcf\xb3\x52\x28\x33\xe4\xca\xce\xcc\x4b\xb1\x52\x08\x4e\x2d\x06\x89\x04\xa7\x26\x17\xa5\x96\x14\x73\x15\x43\x68\x2b\x2e\x5d\x85\xc4\xd2\x92\x8c\xd4\xbc\x92\xcc\xe4\xc4\x12\xb0\x16\xa5\xea\x6a\x05\x3d\xc7\xd2\x92\x0c\x88\x5a\x85\xda\x5a\x25\x2e\x05\x85\xd4\xbc\xe4\xa2\xca\x02\x24\x15\xae\x79\xc9\x48\x0a\x00\x01\x00\x00\xff\xff\x58\x97\xb9\x86\x74\x00\x00\x00") +var _masterEtcOriginMasterSessionSecretsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4a\x2c\xc8\x0c\x4b\x2d\x2a\xce\xcc\xcf\xb3\x52\x28\x33\xe4\xca\xce\xcc\x4b\xb1\x52\x08\x4e\x2d\x06\x89\x04\xa7\x26\x17\xa5\x96\x14\x73\x15\x43\x68\x2b\x2e\x5d\x85\xc4\xd2\x92\x8c\xd4\xbc\x92\xcc\xe4\xc4\x12\xb0\x96\xea\x6a\x05\x3d\xc7\xd2\x92\x0c\x88\x52\x85\x1a\x85\xc2\xd2\xfc\x92\x54\x85\xda\x5a\x2e\x05\x85\xd4\xbc\xe4\xa2\xca\x02\x84\x3a\xd7\xbc\x64\x4c\x65\x80\x00\x00\x00\xff\xff\xc1\xc1\xc9\xa5\x80\x00\x00\x00") func masterEtcOriginMasterSessionSecretsYamlBytes() ([]byte, error) { return bindataRead( @@ -199,7 +199,7 @@ func masterTmpAnsibleAnsibleSh() (*asset, error) { return a, nil } -var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x58\x6d\x6f\xdb\xc8\x11\xfe\xee\x5f\x31\x90\x03\xa8\x45\x23\xa9\x49\x0e\x68\x8f\x40\x3e\xe8\x7c\x4c\x6c\x5c\x6c\x19\x92\x5c\x24\x2d\x0a\x62\xc9\x1d\x8a\x5b\xaf\x76\x78\xbb\x4b\xc9\x3a\xc3\xff\xbd\x18\xee\xea\x8d\x7e\x89\x2f\xfa\x24\x8a\x33\xcf\xcc\xce\x3c\xf3\xb2\x1a\x0c\x06\x27\xa7\xf0\xe9\xe2\xeb\x65\x9a\xc0\xd8\x6c\x7c\xa5\xcc\x02\x72\xd4\xb4\x06\x65\x40\x68\x3d\x28\x44\xed\xa0\x12\x0e\x94\xef\x3b\x58\x09\xdd\x20\x58\xac\xb5\x28\x50\x42\xbe\x01\x87\x92\x45\x7d\x85\x27\xa7\x10\x3f\x54\xa3\x71\x95\x2a\xfd\x52\x38\x8f\xd6\x15\x56\xd5\x7e\xe8\x2a\x58\x57\xaa\xa8\x40\x39\x30\xe4\x41\x49\x14\xfa\x2d\xac\x11\x5c\x45\x8d\x96\x50\xaa\x3b\xf0\x95\xf0\xc3\x13\x4d\x85\xd0\x41\x39\x39\x01\xa8\xc8\x79\xc7\x5f\x00\xda\x37\xfc\x7c\x02\xb0\x12\x36\xfe\x2a\x8c\x53\xb9\xc6\xac\x20\x63\xb0\xf0\x8a\x4c\x12\x24\x8f\xde\xd6\x1b\x5f\x91\xc9\x94\xf1\x68\x6b\x8b\x0c\x0e\xa3\xc6\xd9\x51\xae\xcc\x28\xbc\x7c\x7f\x72\x72\x74\x80\x4c\x39\x06\xf5\x42\x19\xb4\xea\x0f\x94\x09\x7c\x12\xda\x61\x47\xaa\xd0\x0a\x8d\xcf\x72\x65\x84\xdd\x1c\x80\x52\xf1\x08\x6f\x29\x16\x98\x79\xb1\x48\xa0\xb7\xfa\x57\x3a\x9d\x5d\x4c\xae\x7a\x41\xe8\x14\xae\x26\xf3\x34\x81\x79\xa5\x1c\x14\x95\x30\x0b\x74\x1c\xda\x0f\xc3\x77\x7f\xff\x1b\x08\x23\x43\x0a\x84\xd6\x50\xd0\x32\x57\xa6\x8d\xbc\x27\x10\xe0\x94\x59\x68\xe4\x80\x28\x91\x6b\x8c\x70\xe1\xd3\x18\x89\x96\xf3\x03\x46\x2c\xf1\xc0\x95\x35\xe6\x7c\x36\x47\x1a\xa3\x5b\x2c\xd0\xf1\xf7\x50\xa8\xb6\x58\xaa\xbb\x04\x7a\x17\x97\xe3\xcf\x69\x76\x3d\x4d\x3f\x5d\x7c\x1d\xf5\x5e\xd0\xd8\xc3\xee\xb4\xe6\xdf\xae\xd3\xc1\x1a\xf3\x41\x94\x79\x49\x7d\x85\xd6\xb5\xb9\xec\x86\x8a\x2c\x2e\xb2\xc6\xea\x2c\x72\x04\xfa\x97\xe3\xd9\x3c\x9d\x66\x93\x69\xfa\x39\xbb\x99\x7e\x19\xbc\xb9\x2f\x68\x59\x93\x41\xe3\x1f\x92\x37\xf7\x11\xe9\xa1\xdf\xb1\x16\xf4\x33\x89\xa5\x68\xb4\xcf\x5c\x93\x4b\x5a\x0a\x65\x12\xe8\xcf\xd3\xcb\xeb\xe9\xe4\x66\x9e\x4e\x2f\xae\x87\x46\xd5\x43\x45\xfd\x6d\xa2\xda\x8a\x89\xdf\xdb\x74\x45\x06\xe7\x08\x7e\x53\xe3\x47\x65\x4a\x2b\xde\x42\xde\x78\xa6\x77\x25\x56\x08\x9e\x40\xab\x15\xc2\x5a\xf9\x0a\x2c\x2e\x14\x99\x20\x06\x25\x59\x30\xb4\x8e\x70\x39\x16\xa2\x71\x08\x54\x82\xc6\x85\x28\x36\x60\x51\x38\x32\xae\xe3\xb9\xa5\x86\x3d\x77\xa8\xb1\xf0\xc4\x21\x38\x04\xed\x9e\x93\xdf\x39\x6f\x37\x2f\xcb\x9f\xc2\x15\xb5\x65\x09\xeb\x6a\x03\x9e\x0f\xa6\x1c\x08\x90\xaa\x2c\xd1\xa2\xf1\x20\x85\x17\xed\x11\xc3\xe1\x94\x07\xd5\x75\xac\xb6\xb4\x44\x5f\x61\xe3\x32\x43\x12\x0f\x2c\xde\x47\x93\xfd\x04\xfa\xc1\xea\xc3\x36\xa0\x33\xf4\x30\xfe\xa3\xb1\x08\xae\xc6\x42\x95\xaa\x08\xa6\x38\x36\x5c\xed\x28\x61\x7b\x84\x8e\xb9\xf0\xf6\xe0\x80\x9e\x2c\x93\xae\xb6\xb4\x52\x92\xb9\xd1\x13\x0c\x9c\xe5\x9a\xf2\x2e\xd9\x9e\x53\xbe\x55\x46\x26\xd0\xa3\xfc\x7f\x58\xf8\xd7\x2a\xed\xcd\x64\xa2\x28\xa8\x31\x3e\x10\xbf\x3f\x4d\x3f\x5f\xcc\xe6\xd3\x6f\xd9\x6c\x3e\x99\x72\x0d\x8c\xff\x7d\x33\x4d\xb3\xf1\xd9\xd9\xe4\xe6\x6a\x7e\x35\xbe\x4c\xbb\xe9\x7a\xbd\x89\x5b\xdc\x7c\xd7\xc2\x6f\xe9\xb7\x1f\x30\xb0\x6b\x7b\x09\xf4\xb6\x72\x3f\x10\x0a\x8b\x42\x2f\x13\xe8\x15\x64\x71\xb8\x56\x46\xd2\xda\x0d\x0d\xfa\xde\x13\xb5\x14\x7f\x39\x17\x56\x42\x41\x12\x03\x05\x63\x7d\x0c\x8f\x64\xce\x1a\xcb\x84\xd4\x9b\xb6\xb3\x15\xba\xe1\x3a\x06\xe7\x85\x47\x10\x1e\x24\xd6\x9a\x36\x4b\xa6\xac\x57\x4b\x04\x49\x18\x86\x4e\xa8\x45\x6e\x86\x24\xd1\x45\xb0\xe0\x3f\x5a\x94\x5c\xa6\xfc\x36\xf4\x85\x50\xac\x5a\xe4\xa8\x1d\x88\xba\xd6\x0a\x65\xdb\x88\x2d\x0a\xb9\x61\xd9\x1c\xe1\xf7\x06\xad\x42\x19\xa1\xc4\x42\x28\xe3\x3c\xfb\xc0\x38\x35\x29\xe3\xdb\xf9\xc8\x5e\x84\x39\x19\x9d\x6b\x47\x5d\x10\xd2\x62\x93\x13\xdd\x3a\xb0\x8d\x19\xc2\x58\x3b\x7a\x1b\xe1\xf8\x75\xa8\xf4\x76\xe8\xaa\x42\x40\x9b\x76\x88\x1d\x0b\x7a\xa5\xd0\xda\x41\x2e\x8a\xdb\x1e\x3b\xf4\x8e\xfd\xb4\x54\x5b\x25\x3c\xea\x0d\xac\x2b\xb4\x08\xc2\x1d\xe2\xc5\x6c\xed\x10\x35\x2d\xb8\xde\x62\x88\x86\x30\x6f\x75\xd6\x82\x67\x8d\x23\x90\xca\x15\x8d\xe3\xf6\x09\x22\x27\xae\xfa\x32\xa2\x85\x79\xb4\xb7\xc7\x0e\x48\x0a\x59\x8b\x7d\xae\xb5\xe0\xe0\xe3\x47\x08\xcd\xae\x0d\xfb\xae\xc9\x31\x40\xc4\xaa\xd1\x96\x58\x70\x42\x4b\x14\xed\xbc\x66\xb4\x36\x5d\xc2\x1c\x28\xc7\xf3\x57\x6a\x51\xb5\x63\x4d\x6c\x63\xea\x94\xdd\x65\x61\x6b\xf7\x98\x33\x53\x5c\x29\xa7\x3c\x68\xc1\xf1\xfc\x4b\x4d\x8e\xed\x6c\xb8\xc1\x09\x5c\x92\x71\xe8\x81\x2c\xbc\x21\x5f\xa1\xfd\xeb\x33\x54\x0f\x6d\x77\x6b\x20\x81\x77\xdf\x29\x89\x43\xc9\xe7\x3b\x65\x4b\xdc\x04\x7a\xb5\x45\x87\xe6\x51\xcb\xe9\xf6\x54\xbc\xab\xc9\xb2\x1f\x61\xc0\xee\xe6\x64\xff\x7a\x3a\xb9\x4c\xe7\xe7\xe9\xcd\x2c\x4b\xbf\x5e\x4f\xa6\x3c\x13\xe3\xe8\xec\x77\xcd\x1f\x0d\x6a\xe3\xbc\xd0\x3a\x81\xb9\x6d\xf0\x05\x3f\x83\xbd\xd7\x2d\x02\xfb\x1a\xcc\x78\x62\x24\xd0\x1f\x5f\xcd\x2e\x7e\xf9\x92\x66\xbf\xa6\xd7\x5f\x26\xdf\xda\x55\x20\x7a\xb5\x5d\xd1\x1c\xda\x95\x2a\x30\xcb\x2d\xdd\xf2\xf1\x8e\xdc\x3a\x86\xdf\x8a\x16\xc2\x0b\x4d\x8b\x17\x5d\x3b\x58\x3d\xba\x5e\x3e\x0d\xb3\x8f\xe8\x6a\x17\xbe\x27\x37\x07\xf4\x85\xe4\x4d\xc4\x25\xf0\x9f\x5e\xe5\x7d\xed\x92\xd1\xe8\x7c\x32\x6b\xdb\x7a\xf2\xfe\xc3\x3f\x7e\xee\xfd\x37\x1c\xd1\xe3\xb2\x66\xe6\x75\xcf\xf8\x52\x30\x9f\xd3\x79\x6e\xaf\xda\xca\x0f\xa2\xfc\x20\xc8\xbf\x0c\xf6\xec\x96\x75\xb4\x90\x2a\x07\x16\x65\x63\xa4\x30\x3e\x0c\x7f\x8b\xbf\x37\x2a\xf6\xcc\x4a\x18\xc9\x0b\x68\x80\x02\x77\x8b\x6b\xc8\xd1\xaf\x11\xcd\xd1\x32\xba\x8b\xdf\x20\x66\x9c\x7b\xe2\xc4\xaa\x05\xdf\x36\x8c\x84\xc9\xd9\x75\x27\xce\x78\x27\x96\xb5\xc6\xb0\x86\x33\x97\x0e\xdc\x9d\x9d\x4f\xa6\x73\xe6\x77\xaf\x4b\xd9\x82\x8a\xdb\x5a\x6d\x39\x78\x18\xe5\xb3\xc9\xd9\x6f\xd7\x17\xf3\xe7\x48\xfb\x48\x31\x17\x0e\x63\xa0\xb7\xaa\xbf\x8c\x67\x29\xa7\xf7\xbb\xba\x7b\x57\xb7\xaa\x4f\x07\xf8\x57\x6a\x67\x93\xc4\x52\x19\xfc\x6e\xcf\x01\x47\xfb\xd9\xe1\x85\xbb\x85\x52\xed\x76\xfe\x20\x3b\xdc\x2c\x35\x28\xe3\x94\x0c\xc3\xae\x0b\x09\x96\x34\x42\x69\x69\xf9\x44\x46\xd6\x4a\xeb\xed\x34\x6b\x3c\xd5\x54\x37\x4c\x1b\x5e\xf6\x1a\xbe\x68\x3c\x89\xd8\xdd\x47\xdb\x6c\xf2\x68\xdc\x28\xb3\x38\x18\x3e\xa6\x59\xe6\x68\x79\xbb\x3d\x98\x07\x5d\xba\xed\x6f\x32\x2e\x5e\x46\x85\x65\xa6\x78\xb4\x46\x68\xe6\xdb\x63\xaf\xd9\xde\x1a\xfb\xf6\xf8\xf2\x13\xb6\xc0\xe8\xf4\x12\xda\xc1\xc6\x37\xa6\x15\x29\xd9\xfa\xa3\x4c\xc1\x9b\x04\xef\x1b\xce\xb3\x5b\xa5\x28\x3c\x94\xca\xc8\xad\xdf\x3b\x53\xe1\xaa\x09\x7c\xf9\x5a\x92\xd9\x3e\xf1\xb3\x29\xd5\xa2\x25\x4a\x02\x23\xf4\xc5\x88\x5a\x42\x8f\x76\x12\xaf\xe4\x70\x10\xde\x5f\x7b\xe3\x11\x9a\x5c\xab\x62\xd7\xa9\x1b\xab\x13\xd8\x75\x9a\xfb\x7b\x18\xa6\x77\x21\x2e\x97\xad\xe2\x39\xb9\x76\xef\x84\x87\x87\xe4\x9f\x3f\xfd\xf4\x61\x74\x74\xcf\x3a\x00\x14\xb5\xfa\xd3\x60\x7b\x90\xd7\xf4\x3d\x38\x8d\xff\x2c\x5c\x11\x68\x32\x0b\xb4\x60\x10\x25\xef\x50\x8e\x43\xfd\x28\x89\x83\x0f\xc3\x9f\x87\xef\xdf\x0f\xde\xc5\x9b\x53\xdf\x62\x5b\x18\x64\x02\xe1\xb7\xed\x65\x83\x3e\x3a\xc2\xf4\xd9\x87\x8a\x9f\xe2\xce\xbd\x75\xa5\x7f\x72\x42\x14\x9b\xb5\xcb\x3c\x65\x21\x5b\xcf\xfe\xab\xf0\xff\x00\x00\x00\xff\xff\x14\x49\xa7\xb3\x12\x11\x00\x00") +var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x58\x5d\x6f\xdb\x3a\xd2\xbe\xcf\xaf\x18\x38\x05\xfc\xbe\xd8\xda\xde\xb6\x07\xd8\x3d\x02\x7a\xe1\x93\xa3\x36\xc1\x69\xe2\xc0\x76\x16\xed\x2e\x16\x02\x25\x8e\x2c\x6e\x68\x8e\x4a\x52\x76\x7c\xb2\xf9\xef\x8b\x11\xe9\x2f\xe5\xa3\x41\x7d\x65\x59\x33\xcf\x3c\x9c\x6f\x7a\x30\x18\x9c\x9c\xc2\xa7\x8b\xaf\x97\x69\x02\x63\xb3\xf1\x95\x32\x0b\xc8\x51\xd3\x1a\x94\x01\xa1\xf5\xa0\x10\xb5\x83\x4a\x38\x50\xbe\xef\x60\x25\x74\x83\x60\xb1\xd6\xa2\x40\x09\xf9\x06\x1c\x4a\x16\xf5\x15\x9e\x9c\x42\xfc\x50\x8d\xc6\x55\xaa\xf4\x4b\xe1\x3c\x5a\x57\x58\x55\xfb\xa1\xab\x60\x5d\xa9\xa2\x02\xe5\xc0\x90\x07\x25\x51\xe8\xb7\xb0\x46\x70\x15\x35\x5a\x42\xa9\xee\xc0\x57\xc2\x0f\x4f\x34\x15\x42\x07\xe5\xe4\x04\xa0\x22\xe7\x1d\x7f\x01\x68\xdf\xf0\xf3\x09\xc0\x4a\xd8\xf8\xab\x30\x4e\xe5\x1a\xb3\x82\x8c\xc1\xc2\x2b\x32\x49\x90\x3c\x7a\x5b\x6f\x7c\x45\x26\x53\xc6\xa3\xad\x2d\x32\x38\x8c\x1a\x67\x47\xb9\x32\xa3\xf0\xf2\xfd\xc9\xc9\xd1\x01\x32\xe5\x18\xd4\x0b\x65\xd0\xaa\x3f\x51\x26\xf0\x49\x68\x87\x1d\xa9\x42\x2b\x34\x3e\xcb\x95\x11\x76\x73\x00\x4a\xc5\x23\xbc\xa5\x58\x60\xe6\xc5\x22\x81\xde\xea\x1f\xe9\x74\x76\x31\xb9\xea\x05\xa1\x53\xb8\x9a\xcc\xd3\x04\xe6\x95\x72\x50\x54\xc2\x2c\xd0\xb1\x6b\x3f\x0c\xdf\xfd\xf5\x2f\x20\x8c\x0c\x21\x10\x5a\x43\x41\xcb\x5c\x99\xd6\xf3\x9e\x40\x80\x53\x66\xa1\x91\x1d\xa2\x44\xae\x31\xc2\x85\x4f\x63\x24\x5a\x8e\x0f\x18\xb1\xc4\x03\x2a\x6b\xcc\xf9\x6c\x8e\x34\x46\x5a\x2c\xd0\xe1\x7b\x28\x54\x5b\x2c\xd5\x5d\x02\xbd\x8b\xcb\xf1\xe7\x34\xbb\x9e\xa6\x9f\x2e\xbe\x8e\x7a\x2f\x68\xec\x61\x77\x5a\xf3\x6f\xd7\xe9\x60\x8d\xf9\x20\xca\xbc\xa4\xbe\x42\xeb\xda\x58\x76\x5d\x45\x16\x17\x59\x63\x75\x16\x73\x04\xfa\x97\xe3\xd9\x3c\x9d\x66\x93\x69\xfa\x39\xbb\x99\x7e\x19\xbc\xb9\x2f\x68\x59\x93\x41\xe3\x1f\x92\x37\xf7\x11\xe9\xa1\xdf\xb1\x16\xf4\x33\x89\xa5\x68\xb4\xcf\x5c\x93\x4b\x5a\x0a\x65\x12\xe8\xcf\xd3\xcb\xeb\xe9\xe4\x66\x9e\x4e\x2f\xae\x87\x46\xd5\x43\x45\xfd\x6d\xa0\xda\x8a\x89\xdf\xdb\x70\xc5\x0c\xce\x11\xfc\xa6\xc6\x8f\xca\x94\x56\xbc\x85\xbc\xf1\x9c\xde\x95\x58\x21\x78\x02\xad\x56\x08\x6b\xe5\x2b\xb0\xb8\x50\x64\x82\x18\x94\x64\xc1\xd0\x3a\xc2\xe5\x58\x88\xc6\x21\x50\x09\x1a\x17\xa2\xd8\x80\x45\xe1\xc8\xb8\x0e\x73\x4b\x0d\x33\x77\xa8\xb1\xf0\xc4\x2e\x38\x04\xed\x9e\x93\xdf\x39\x6f\x37\x2f\xcb\x9f\xc2\x15\xb5\x65\x09\xeb\x6a\x03\x9e\x0f\xa6\x1c\x08\x90\xaa\x2c\xd1\xa2\xf1\x20\x85\x17\xed\x11\xc3\xe1\x94\x07\xd5\x25\x56\x5b\x5a\xa2\xaf\xb0\x71\x99\x21\x89\x07\x16\xef\xa3\xc9\x7e\x02\xfd\x60\xf5\x61\xeb\xd0\x19\x7a\x18\xff\xd9\x58\x04\x57\x63\xa1\x4a\x55\x04\x53\xec\x1b\xae\x76\x94\xb0\x3d\x42\xc7\x5c\x78\x7b\x70\x40\x4f\x96\x93\xae\xb6\xb4\x52\x92\x73\xa3\x27\x18\x38\xcb\x35\xe5\xdd\x64\x7b\x4e\xf9\x56\x19\x99\x40\x8f\xf2\xff\x60\xe1\x5f\xab\xb4\x37\x93\x89\xa2\xa0\xc6\xf8\x90\xf8\xfd\x69\xfa\xf9\x62\x36\x9f\x7e\xcb\x66\xf3\xc9\x94\x6b\x60\xfc\xcf\x9b\x69\x9a\x8d\xcf\xce\x26\x37\x57\xf3\xab\xf1\x65\xda\x0d\xd7\xeb\x4d\xdc\xe2\xe6\x87\x16\xfe\x48\xbf\xfd\x84\x81\x5d\xdb\x4b\xa0\xb7\x95\xfb\x09\x57\x58\x14\x7a\x99\x40\xaf\x20\x8b\xc3\xb5\x32\x92\xd6\x6e\x68\xd0\xf7\x9e\xa8\xa5\xf8\xcb\xb9\xb0\x12\x0a\x92\x18\x52\x30\xd6\xc7\xf0\x48\xe6\xac\xb1\x9c\x90\x7a\xd3\x76\xb6\x42\x37\x5c\xc7\xe0\xbc\xf0\x08\xc2\x83\xc4\x5a\xd3\x66\xc9\x29\xeb\xd5\x12\x41\x12\x86\xa1\x13\x6a\x91\x9b\x21\x49\x74\x11\x2c\xf0\x47\x8b\x92\xcb\x94\xdf\x86\xbe\x10\x8a\x55\x8b\x1c\xb5\x03\x51\xd7\x5a\xa1\x6c\x1b\xb1\x45\x21\x37\x2c\x9b\x23\x7c\x6f\xd0\x2a\x94\x11\x4a\x2c\x84\x32\xce\x33\x07\xc6\xa9\x49\x19\xdf\xce\x47\x66\x11\xe6\x64\x24\xd7\x8e\xba\x20\xa4\xc5\x26\x27\xba\x75\x60\x1b\x33\x84\xb1\x76\xf4\x36\xc2\xf1\xeb\x50\xe9\xed\xd0\x55\x85\x80\x36\xec\x10\x3b\x16\xf4\x4a\xa1\xb5\x83\x5c\x14\xb7\x3d\x26\xf4\x8e\x79\x5a\xaa\xad\x12\x1e\xf5\x06\xd6\x15\x5a\x04\xe1\x0e\xf1\x62\xb4\x76\x88\x9a\x16\x5c\x6f\xd1\x45\x43\x98\xb7\x3a\x6b\xc1\xb3\xc6\x11\x48\xe5\x8a\xc6\x71\xfb\x04\x91\x13\x57\x7d\x19\xd1\xc2\x3c\xda\xdb\x63\x02\x92\x42\xd4\x62\x9f\x6b\x2d\x38\xf8\xf8\x11\x42\xb3\x6b\xdd\xbe\x6b\x72\x0c\x10\xb1\x6a\xb4\x25\x16\x1c\xd0\x12\x45\x3b\xaf\x19\xad\x0d\x97\x30\x07\xca\xf1\xfc\x95\x5a\x54\xed\x58\x13\x5b\x9f\x3a\x65\x77\x51\xd8\xda\x3d\xce\x99\x29\xae\x94\x53\x1e\xb4\x60\x7f\xfe\x5f\x4d\x8e\xed\x6c\xb8\xc1\x09\x5c\x92\x71\xe8\x81\x2c\xbc\x21\x5f\xa1\xfd\xff\x67\x52\x3d\xb4\xdd\xad\x81\x04\xde\xfd\xa0\x24\x0e\x25\x9f\xef\x94\x6d\xe2\x26\xd0\xab\x2d\x3a\x34\x8f\x5a\x4e\xb7\xa7\xe2\x5d\x4d\x96\x79\x84\x01\xbb\x9b\x93\xfd\xeb\xe9\xe4\x32\x9d\x9f\xa7\x37\xb3\x2c\xfd\x7a\x3d\x99\xf2\x4c\x8c\xa3\xb3\xdf\x35\x7f\x34\xa8\x8d\xf3\x42\xeb\x04\xe6\xb6\xc1\x17\x78\x06\x7b\xaf\x5b\x04\xf6\x35\x98\xf1\xc4\x48\xa0\x3f\xbe\x9a\x5d\xfc\xf6\x25\xcd\x7e\x4f\xaf\xbf\x4c\xbe\xb5\xab\x40\x64\xb5\x5d\xd1\x1c\xda\x95\x2a\x30\xcb\x2d\xdd\xf2\xf1\x8e\x68\x1d\xc3\x6f\x45\x0b\xe1\x85\xa6\xc5\x8b\xd4\x0e\x56\x8f\x2e\xcb\xa7\x61\xf6\x1e\x5d\xed\xdc\xf7\xe4\xe6\x80\xbe\x90\xbc\x89\xb8\x04\xfe\xd5\xab\xbc\xaf\x5d\x32\x1a\x9d\x4f\x66\x6d\x5b\x4f\xde\x7f\xf8\xdb\xaf\xbd\x7f\x87\x23\x7a\x5c\xd6\x9c\x79\xdd\x33\xbe\xe4\xcc\xe7\x74\x9e\xdb\xab\xb6\xf2\x83\x28\x3f\x08\xf2\x2f\x83\x3d\xbb\x65\x1d\x2d\xa4\xca\x81\x45\xd9\x18\x29\x8c\x0f\xc3\xdf\xe2\xf7\x46\xc5\x9e\x59\x09\x23\x79\x01\x0d\x50\xe0\x6e\x71\x0d\x39\xfa\x35\xa2\x39\x5a\x46\x77\xfe\x1b\xc4\x88\x73\x4f\x9c\x58\xb5\xe0\xdb\x86\x91\x30\x39\xbb\xee\xf8\x19\xef\xc4\xb2\xd6\x18\xd6\x70\xce\xa5\x03\xba\xb3\xf3\xc9\x74\xce\xf9\xdd\xeb\xa6\x6c\x41\xc5\x6d\xad\xb6\x39\x78\xe8\xe5\xb3\xc9\xd9\x1f\xd7\x17\xf3\xe7\x92\xf6\x91\x62\x2e\x1c\x46\x47\x6f\x55\x7f\x1b\xcf\x52\x0e\xef\x0f\x75\xf7\x54\xb7\xaa\x4f\x3b\xf8\x77\x6a\x67\x93\xc4\x52\x19\xfc\x61\xcf\x01\x47\xfb\xd9\xe1\x85\xbb\x85\x52\xed\x76\xfe\x20\x3b\xdc\x2c\x35\x28\xe3\x94\x0c\xc3\xae\x0b\x09\x96\x34\x42\x69\x69\xf9\x44\x44\xd6\x4a\xeb\xed\x34\x6b\x3c\xd5\x54\x37\x9c\x36\xbc\xec\x35\x7c\xd1\x78\x12\xb1\xbb\x8f\xb6\xd1\xe4\xd1\xb8\x51\x66\x71\x30\x7c\x4c\xb3\xcc\xd1\xf2\x76\x7b\x30\x0f\xba\xe9\xb6\xbf\xc9\xb8\x78\x19\x15\x96\x33\xc5\xa3\x35\x42\x73\xbe\x3d\x66\xcd\xf6\xd6\xd8\xb7\xc7\x97\x9f\xb0\x05\x46\xd2\x4b\x68\x07\x1b\xdf\x98\x56\xa4\x64\xcb\x47\x99\x82\x37\x09\xde\x37\x9c\x67\x5a\xa5\x28\x3c\x94\xca\xc8\x2d\xef\x9d\xa9\x70\xd5\x04\xbe\x7c\x2d\xc9\x6c\x9f\xf8\xd9\x94\x6a\xd1\x26\x4a\x02\x23\xf4\xc5\x88\xda\x84\x1e\xed\x24\x5e\x99\xc3\x41\x78\x7f\xed\x8d\x47\x68\x72\xad\x8a\x5d\xa7\x6e\xac\x4e\xe0\xfe\x1e\x6a\xcb\x9b\xc5\xae\xe5\xf4\x60\x98\xde\x05\xff\x5c\xb6\x00\xe7\xe4\xda\xfd\x13\x7a\xc9\xdf\x7f\xf9\xe5\xc3\x68\x7b\xdd\x82\xff\xc2\xf7\x86\x3c\xc2\xc3\x43\xd7\x84\xa8\xd5\xcf\xc2\x3f\x09\xfb\x9a\xde\x08\xa7\xf1\xdf\x87\x2b\x02\x4d\x66\x81\x16\x0c\xa2\xe4\x3d\xcb\x71\x38\x1e\x05\x7a\xf0\x61\xf8\xeb\xf0\xfd\xfb\xc1\xbb\x78\xbb\xea\x5b\x6c\x8b\x87\x4c\x28\x8a\x6d\x0b\xda\xa0\x8f\x44\x38\xc5\xf6\xee\xe4\xa7\xb8\x97\x6f\xa9\xf4\x4f\x4e\x88\x62\x43\x77\x99\xa7\x2c\x44\xf4\xd9\x7f\x1e\xfe\x17\x00\x00\xff\xff\x14\x17\x6b\xa6\x36\x11\x00\x00") func masterTmpAnsibleAzureLocalMasterInventoryYmlBytes() ([]byte, error) { return bindataRead( @@ -299,7 +299,7 @@ func masterTmpBootstrapconfigsMasterConfigYaml() (*asset, error) { return a, nil } -var _nodeEtcAzureAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\xd0\x51\x0a\x82\x40\x10\x06\xe0\xf7\x4e\xe1\x09\x3c\x40\x6f\x8b\x42\x04\x11\x91\x5e\x60\x5c\xa7\x18\x58\x77\x65\x9c\x0d\x4c\xbc\x7b\xac\xfa\x60\xb9\x45\xcf\xff\xf7\xff\xec\x8e\xa0\x05\x2b\xc7\x7a\x9f\x0c\x43\x92\xaa\xa7\x67\xcc\x9c\xbd\xd1\x3d\x2d\xe7\x24\x4f\xc6\x71\xd7\xf9\xaa\xd3\x4c\xad\x90\xb3\x31\x5b\xac\xf3\xa9\x01\x50\x67\x86\x30\x3e\xad\x54\xbe\x84\xef\xb6\x40\xcd\x28\x3f\xfc\x0c\x96\x4e\xf9\xd7\xd3\x19\x3b\xe7\x59\xe3\x81\x9d\x6f\xb7\xf4\xba\x8e\x83\x37\x4e\x43\xf8\xc6\x96\x9e\x96\x64\x3a\x08\x6a\xcf\x24\xfd\x54\x3b\x43\x83\x91\x9b\x7c\x92\xd0\x6b\x99\x1a\xe0\x5e\x3d\x80\x0c\x54\x64\x48\xfa\x02\x25\x3e\x70\xf9\x6a\xc3\xd2\x2b\x00\x00\xff\xff\x0d\x19\xb3\x4a\xb9\x01\x00\x00") +var _nodeEtcAzureAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x90\x51\x8a\x83\x30\x10\x86\xdf\xf7\x14\x39\x81\x07\xd8\xb7\xa0\xb0\x2c\x2c\xcb\xb2\x7a\x81\x31\x4e\xcb\x40\x4c\xec\x38\x29\x58\xeb\xdd\x4b\xab\x0f\x51\xd3\x42\x9f\xbf\xff\xfb\x87\xf9\x05\x1d\x38\xf9\x6e\x3e\xd5\x38\xaa\x4c\x5f\x02\x63\xee\xdd\x81\x8e\x59\x35\x93\x42\x5d\xd5\x29\x78\x41\x35\x4d\x1f\x7d\xa8\x7b\xc3\xd4\x09\x79\x97\x72\xca\x98\xaf\x4c\x80\x26\xb7\x84\xe9\x53\x5a\x17\x0b\x4c\x3b\x25\x1a\x46\x79\xe1\xcd\x81\x8d\x5b\xbd\xf5\x1a\x63\xef\x03\x1b\xfc\x62\x1f\xba\xbd\xf2\x1f\xe3\xd8\xb3\xde\xc0\xfd\xdd\xbd\xf2\xb3\x90\xd5\x80\x68\x02\x93\x0c\x8f\x9a\x5f\x68\x31\xb1\xe1\x36\x12\xfb\x1d\x53\x0b\x3c\xe8\x33\x90\x85\x9a\x2c\xc9\x50\xa2\xa4\x8b\xfe\x9e\x66\xe3\xc6\x5b\x00\x00\x00\xff\xff\x34\x7d\xff\xaa\x01\x02\x00\x00") func nodeEtcAzureAzureConfBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/openshift/certgen/release39/templates/master/etc/etcd/etcd.conf b/pkg/openshift/certgen/release39/templates/master/etc/etcd/etcd.conf index 4762d60b51..c148e25cc3 100644 --- a/pkg/openshift/certgen/release39/templates/master/etc/etcd/etcd.conf +++ b/pkg/openshift/certgen/release39/templates/master/etc/etcd/etcd.conf @@ -1,26 +1,26 @@ -ETCD_NAME={{ .Master.Hostname }} -ETCD_LISTEN_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380 +ETCD_NAME={{ .Master.Hostname | shellQuote }} +ETCD_LISTEN_PEER_URLS={{ print "https://" (index .Master.IPs 0).String ":2380" | shellQuote }} ETCD_DATA_DIR=/var/lib/etcd/ #ETCD_WAL_DIR="" #ETCD_SNAPSHOT_COUNT=10000 ETCD_HEARTBEAT_INTERVAL=500 ETCD_ELECTION_TIMEOUT=2500 -ETCD_LISTEN_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379 +ETCD_LISTEN_CLIENT_URLS={{ print "https://" (index .Master.IPs 0).String ":2379" | shellQuote }} #ETCD_MAX_SNAPSHOTS=5 #ETCD_MAX_WALS=5 #ETCD_CORS= #[cluster] -ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380 -ETCD_INITIAL_CLUSTER={{ .Master.Hostname }}=https://{{ (index .Master.IPs 0).String }}:2380 +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ print "https://" (index .Master.IPs 0).String ":2380" | shellQuote }} +ETCD_INITIAL_CLUSTER={{ print .Master.Hostname "=https://" (index .Master.IPs 0).String ":2380" | shellQuote }} ETCD_INITIAL_CLUSTER_STATE=new ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster-1 #ETCD_DISCOVERY= #ETCD_DISCOVERY_SRV= #ETCD_DISCOVERY_FALLBACK=proxy #ETCD_DISCOVERY_PROXY= -ETCD_ADVERTISE_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379 +ETCD_ADVERTISE_CLIENT_URLS={{ print "https://" (index .Master.IPs 0).String ":2379" | shellQuote }} #ETCD_STRICT_RECONFIG_CHECK="false" #ETCD_AUTO_COMPACTION_RETENTION="0" #ETCD_ENABLE_V2="true" diff --git a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml index 80bd0df15d..d522e1d843 100644 --- a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml +++ b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml @@ -60,13 +60,13 @@ controllers: '*' corsAllowedOrigins: - (?i)//127\.0\.0\.1(:|\z) - (?i)//localhost(:|\z) -- (?i)//{{ QuoteMeta (index .Master.IPs 0).String }}(:|\z) +- {{ print "(?i)//" (QuoteMeta (index .Master.IPs 0).String) "(:|\\z)" | quote }} - (?i)//kubernetes\.default(:|\z) - (?i)//kubernetes\.default\.svc\.cluster\.local(:|\z) - (?i)//kubernetes(:|\z) -- (?i)//{{ QuoteMeta .ExternalMasterHostname }}(:|\z) +- {{ print "(?i)//" (QuoteMeta .ExternalMasterHostname) "(:|\\z)" | quote }} - (?i)//openshift\.default(:|\z) -- (?i)//{{ QuoteMeta .Master.Hostname }}(:|\z) +- {{ print "(?i)//" (QuoteMeta .Master.Hostname) "(:|\\z)" | quote }} - (?i)//openshift\.default\.svc(:|\z) - (?i)//kubernetes\.default\.svc(:|\z) - (?i)//172\.30\.0\.1(:|\z) @@ -80,7 +80,7 @@ etcdClientInfo: certFile: master.etcd-client.crt keyFile: master.etcd-client.key urls: - - https://{{ .Master.Hostname }}:2379 + - {{ print "https://" .Master.Hostname ":2379" | quote }} etcdStorageConfig: kubernetesStoragePrefix: kubernetes.io kubernetesStorageVersion: v1 @@ -119,7 +119,7 @@ kubernetesMasterConfig: cloud-config: - "/etc/azure/azure.conf" masterCount: 1 - masterIP: {{ (index .Master.IPs 0).String }} + masterIP: {{ (index .Master.IPs 0).String | quote }} podEvictionTimeout: null proxyClientInfo: certFile: master.proxy-client.crt @@ -142,7 +142,7 @@ masterClients: contentType: application/vnd.kubernetes.protobuf qps: 300 openshiftLoopbackKubeConfig: openshift-master.kubeconfig -masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} +masterPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} networkConfig: clusterNetworkCIDR: 10.128.0.0/14 clusterNetworks: @@ -154,7 +154,7 @@ networkConfig: networkPluginName: redhat/openshift-ovs-subnet serviceNetworkCIDR: 172.30.0.0/16 oauthConfig: - assetPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }}/console/ + assetPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port "/console/" | quote }} grantConfig: method: auto identityProviders: @@ -166,8 +166,8 @@ oauthConfig: provider: apiVersion: v1 kind: OpenIDIdentityProvider - clientID: {{ .AzureConfig.AADClientID }} - clientSecret: {{ .AzureConfig.AADClientSecret }} + clientID: {{ .AzureConfig.AADClientID | quote }} + clientSecret: {{ .AzureConfig.AADClientSecret | quote }} claims: id: - sub @@ -178,8 +178,8 @@ oauthConfig: email: - email urls: - authorize: https://login.microsoftonline.com/{{ .AzureConfig.TenantID }}/oauth2/authorize - token: https://login.microsoftonline.com/{{ .AzureConfig.TenantID }}/oauth2/token + authorize: {{ print "https://login.microsoftonline.com/" .AzureConfig.TenantID "/oauth2/authorize" | quote }} + token: {{ print "https://login.microsoftonline.com/" .AzureConfig.TenantID "/oauth2/token" | quote }} {{- end}} - name: Local password challenge: true @@ -190,8 +190,8 @@ oauthConfig: file: /etc/origin/master/htpasswd kind: HTPasswdPasswordIdentityProvider masterCA: ca-bundle.crt - masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} - masterURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} + masterPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} + masterURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} sessionConfig: sessionMaxAgeSeconds: 3600 sessionName: ssn @@ -225,7 +225,7 @@ serviceAccountConfig: publicKeyFiles: - serviceaccounts.public.key servingInfo: - bindAddress: 0.0.0.0:{{ .Master.Port }} + bindAddress: {{ print "0.0.0.0:" .Master.Port | quote }} bindNetwork: tcp4 certFile: master.server.crt clientCA: ca.crt diff --git a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/session-secrets.yaml b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/session-secrets.yaml index e958ea1d0f..d440e83292 100644 --- a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/session-secrets.yaml +++ b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/session-secrets.yaml @@ -1,5 +1,5 @@ apiVersion: v1 kind: SessionSecrets secrets: -- authentication: "{{ .AuthSecret }}" - encryption: "{{ .EncSecret }}" +- authentication: {{ .AuthSecret | quote }} + encryption: {{ .EncSecret | quote }} diff --git a/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml b/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml index 56f8c5d5a0..a98b70883f 100644 --- a/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml +++ b/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml @@ -92,8 +92,8 @@ localmaster: config_base: /etc/origin/ examples_content_version: "vSHORT_VER" master: - public_console_url: "https://{{ .ExternalMasterHostname }}:8443/console" - public_api_url: "https://{{ .ExternalMasterHostname }}:8443" + public_console_url: {{ print "https://" .ExternalMasterHostname ":8443/console" | quote }} + public_api_url: {{ print "https://" .ExternalMasterHostname ":8443" | quote }} etcd_urls: ["https://HOSTNAME:2379"] #FIXME: No longer needed as of openshift-ansible-3.9.22-1 but we're not on that version yet node: nodename: 'HOSTNAME' diff --git a/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf b/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf index fa3d075114..870b2aad1d 100644 --- a/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf +++ b/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf @@ -1,9 +1,9 @@ -tenantId: {{ .AzureConfig.TenantID }} -subscriptionId: {{ .AzureConfig.SubscriptionID }} -aadClientId: {{ .AzureConfig.AADClientID }} -aadClientSecret: {{ .AzureConfig.AADClientSecret }} -aadTenantId: {{ .AzureConfig.TenantID }} -resourceGroup: {{ .AzureConfig.ResourceGroup }} -location: {{ .AzureConfig.Location }} -securityGroupName: {{ .AzureConfig.SecurityGroupName }} -primaryAvailabilitySetName: {{ .AzureConfig.PrimaryAvailabilitySetName }} +tenantId: {{ .AzureConfig.TenantID | quote }} +subscriptionId: {{ .AzureConfig.SubscriptionID | quote }} +aadClientId: {{ .AzureConfig.AADClientID | quote }} +aadClientSecret: {{ .AzureConfig.AADClientSecret | quote }} +aadTenantId: {{ .AzureConfig.TenantID | quote }} +resourceGroup: {{ .AzureConfig.ResourceGroup | quote }} +location: {{ .AzureConfig.Location | quote }} +securityGroupName: {{ .AzureConfig.SecurityGroupName | quote }} +primaryAvailabilitySetName: {{ .AzureConfig.PrimaryAvailabilitySetName | quote }} diff --git a/pkg/openshift/certgen/unstable/files.go b/pkg/openshift/certgen/unstable/files.go index b86cc84c71..6fd5525989 100644 --- a/pkg/openshift/certgen/unstable/files.go +++ b/pkg/openshift/certgen/unstable/files.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "os" "regexp" + "strconv" "strings" "text/template" @@ -112,6 +113,10 @@ func (c *Config) WriteMasterFiles(fs filesystem.Writer) error { h, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) return string(h), err }, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, }).Parse(string(tb)) if err != nil { return err @@ -146,6 +151,10 @@ func (c *Config) WriteNodeFiles(fs filesystem.Writer) error { t, err := template.New("template").Funcs(template.FuncMap{ "QuoteMeta": regexp.QuoteMeta, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, }).Parse(string(tb)) if err != nil { return err diff --git a/pkg/openshift/certgen/unstable/templates/bindata.go b/pkg/openshift/certgen/unstable/templates/bindata.go index e1b0f4a9f7..ed0ded62b7 100644 --- a/pkg/openshift/certgen/unstable/templates/bindata.go +++ b/pkg/openshift/certgen/unstable/templates/bindata.go @@ -78,7 +78,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _masterEtcEtcdEtcdConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x53\x5d\x6f\xea\x38\x10\x7d\xcf\xaf\x40\xe6\x65\xf7\xa1\x90\xd2\xaf\xa5\x92\x1f\x8c\x33\x80\x85\x49\xb2\xb6\x03\x45\x55\x65\x51\x1a\x68\xb4\x14\x50\x12\xba\xad\x10\xff\xfd\xca\x49\x20\xb4\xd0\xab\xab\xdb\xfb\x96\xcc\x39\x33\x3e\x33\x73\x06\x14\x75\xb4\x4b\xfa\x80\x37\x9b\x4a\xad\x3f\x4e\xd2\x30\xae\x75\x97\x49\xba\x18\xbf\x84\x95\xed\xd6\xca\x08\x9c\x49\x05\xae\xf6\x01\x84\x0e\x04\x97\xf8\x39\x4d\x57\xc9\x6d\xbd\xbe\xd9\x54\xfe\x8a\x16\x4f\xe1\xdb\x3e\x97\xf9\x49\xc5\xfe\xbb\x26\xd3\x38\x5a\xcc\x2a\xdb\xed\x6d\xe3\xe2\x1f\x3b\xaf\xe2\x10\x45\xb4\xc3\x04\xae\xbf\x8e\xe3\xfa\x3c\x7a\xac\x87\xe9\xe4\xa9\x6e\x55\x33\x74\x48\x78\x06\x22\x54\x04\xa4\x4b\x7c\xd9\xf5\x94\xa6\x5e\xe0\x2a\x7c\x6e\xdb\x76\x51\xa8\x0b\x44\xa8\x16\x10\xa5\x99\xab\x40\x0c\x08\xc7\x57\x3b\x0c\x38\x50\xc5\x3c\x57\x2b\xd6\x07\x2f\x50\xb8\xb1\x87\x8a\x2e\x28\x67\xe0\xaa\xdf\xe8\xe3\xa6\x59\x28\xeb\x93\xbb\xbd\x3a\x89\xaf\x0e\xa2\x43\xc2\xcb\x00\xf5\x84\xc4\x96\x65\x55\xef\x27\xf3\xb5\x29\xfa\x90\xeb\x60\x2e\x53\x8c\x70\x4d\x9c\x01\x08\xc5\x24\x7c\x77\xb0\xbb\x82\x94\x07\x52\x81\xf8\x62\x95\x7f\xa4\xb6\x96\x8a\x28\xc0\x8b\xf0\xff\xd3\xb0\xf2\x7a\xe0\x62\xb3\xd7\xb3\xa2\xe9\xb3\xf3\x62\x1c\x0e\x93\xd4\x1b\x80\x18\xe1\xcf\x01\x2d\xc5\xe0\x38\xd8\x26\x9c\xb7\x08\xed\xe1\x55\xbc\x7c\x7b\x3f\x82\x7d\xe1\xdd\x8d\x70\xae\xa2\x9c\xe4\xf7\x97\x2b\x95\x60\x54\x69\x01\xd4\x73\xdb\xac\xa3\x69\x17\x68\x0f\xa3\xe9\x78\x9e\x84\x3b\x6b\x92\x40\x79\x9a\x7a\x7d\x9f\xe4\x5e\x13\xa0\xc0\x35\x5f\x18\xd9\x3b\x0e\xb8\xa4\xc5\x41\x0f\x1a\x18\xa5\xf1\x3a\x44\xb9\xd2\x7f\x03\x4f\x11\x6d\xda\x02\xd7\xd1\xad\x91\x02\x89\x2f\x1b\xcd\xcb\xe6\xf5\x4d\xa3\x79\x6d\xbc\x92\x75\xfb\x50\x14\xc9\x9b\x5c\x4e\xa7\x87\xff\xba\x4d\x18\x0f\x04\xe8\x21\x61\x0a\xa3\x2b\xdb\xde\x3f\x9a\xe3\x02\xda\x02\x64\xb7\xbc\x0e\x74\x61\x1f\x91\x1c\xb3\xb6\xdd\x91\xa0\xf3\x23\x7c\x28\x98\x82\x92\x70\xea\x15\xe2\x94\xb8\x8d\x8c\xf8\x24\x9c\xac\xe3\x28\x7d\x2f\x9c\xae\x84\x71\x85\xa3\x29\xd1\x6d\xc6\x01\x9b\x83\xcf\x8f\x7e\x32\xae\x4d\xe2\x34\x67\x15\x3b\xa3\x20\x94\x99\x6c\xf7\xc3\xc0\xb2\xe8\xa7\xe4\x24\x8c\x5f\xc3\xb8\x2c\xd0\x83\xd1\x17\x94\xff\xc2\xf7\xc3\x95\x29\x2e\xf7\x9b\xcc\x3b\x31\x87\xf7\x6b\x2a\x33\xea\x4f\xa5\xe6\x8c\x13\x7a\x57\xe1\xa1\xda\x8c\x76\x42\x72\xc6\x2a\x05\x67\xb4\x23\xd5\x56\xf5\x7e\xbe\x9c\xcd\xa2\xc5\xac\x18\xb1\x03\xad\xa0\x83\x51\xfb\xa0\x29\xee\x75\xb4\x4f\x68\x8f\x74\x40\x73\x18\x80\x49\x37\x2f\xe4\x33\xc1\x59\x06\x2a\xac\x36\x8d\xe6\x59\xad\x0f\x9e\xf5\x7d\xe1\xb5\x3f\x59\xbe\x0f\xe6\x2e\x24\x46\x8f\xe3\x24\x9a\x20\xab\x6a\x55\xef\xc7\xeb\xf4\xf9\xa1\x9c\x6f\xb7\xb8\x7e\x94\x44\x2f\xab\x79\x88\xac\x1f\x01\x00\x00\xff\xff\x20\x54\xc1\x6d\x5d\x06\x00\x00") +var _masterEtcEtcdEtcdConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x54\x5d\x6f\xea\x38\x10\x7d\xcf\xaf\x88\xcc\xcb\xee\xc3\x85\x94\xdb\x8f\xa5\x92\x1f\x8c\x33\x80\x85\x49\x52\xdb\x81\xa2\xaa\xb2\x28\x0d\x34\xda\x14\x50\x12\xba\xad\xba\xfd\xef\x2b\x27\x81\xd0\xc2\xae\x56\xba\xea\x1b\xcc\x1c\xcf\x9c\x39\x73\x26\xa0\xa8\xab\x3d\x32\x02\xfc\xfe\x6e\x37\x47\xb3\x2c\x8f\xd2\xe6\x60\x9d\xe5\xab\xd9\x73\x64\xff\x6d\x67\x4f\x51\x92\xdc\x6c\xd7\x79\x64\x7f\x7c\x58\x05\x9a\x33\xa9\xc0\xd3\x01\x80\xd0\xa1\xe0\xd2\xbc\xdc\xa4\xf1\x2a\xb7\xd1\x53\x9e\x6f\xb2\xeb\x56\x0b\xd9\xbf\xc5\xab\xc7\xe8\x75\x5f\x91\x05\x99\xed\xfc\xde\x94\x79\x1a\xaf\x96\x36\xba\x6e\xff\xfc\xc3\x41\xa7\xcb\xbb\x44\x11\xed\x32\x81\x5b\x2f\xb3\xb4\x95\xc4\x0f\xad\x28\x9f\x3f\xb6\xac\x46\x91\x9d\x10\x5e\x24\x11\xaa\x02\xd2\x23\x81\x1c\xf8\x4a\x53\x3f\xf4\x14\x3e\x73\x1c\xc7\x29\x0b\x0d\x80\x08\xd5\x05\xa2\x34\xf3\x14\x88\x31\xe1\xf8\x62\x97\x03\x0e\x54\x31\xdf\xd3\x8a\x8d\xc0\x0f\x15\x6e\xef\x53\xd5\x78\x94\x33\xf0\xd4\x2f\x0c\x78\xd5\x39\x1e\xb0\xa4\x3c\x22\xb7\x7b\xda\x12\x5f\x1c\x44\x27\x84\xd7\x01\xea\x0b\x89\x2d\xcb\x6a\xdc\xcd\x93\xad\x69\x72\x5f\x12\x64\x1e\x53\x8c\x70\x4d\xdc\x31\x08\xc5\x24\x7c\xdb\x2a\x76\x9d\x28\x0f\xa5\x02\x51\x57\x3f\x32\x0a\xc2\xdf\xd0\x4f\x4b\x45\x14\xe0\x55\xf4\xd7\xe9\xb4\xf2\x87\xe0\x61\xe3\x8e\x1f\x95\x42\x3f\xce\x2a\xed\x5c\x26\xa9\x3f\x06\x31\xc5\x5f\x03\x5a\x8a\xf1\x71\xb0\x47\x38\xef\x12\x3a\xc4\x9b\x74\xfd\xfa\x76\x94\x0e\x84\x7f\x3b\xc5\x25\x8b\x5a\xf6\x6f\xb4\x88\x54\x82\x51\xa5\x05\x50\xdf\xeb\xb1\xbe\xa6\x03\xa0\x43\x8c\x16\xb3\x24\x8b\x76\xce\x27\xa1\xf2\x35\xf5\x47\x01\x29\xad\x2c\x40\x81\x67\x7e\x61\xe4\xec\x30\xe0\x91\x2e\x07\x3d\x6e\x63\x94\xa7\xdb\x08\x95\x23\xdc\x84\xbe\x22\xda\xcc\x0b\x9e\xab\xbb\x53\x05\x12\x9f\xb7\x3b\xe7\x9d\xcb\xab\x76\xe7\xd2\x38\xae\x90\xe1\xbe\x2a\x52\x4e\xbf\x5e\x2c\x0e\xff\xeb\x1e\x61\x3c\x14\xa0\x27\x84\x29\x8c\x2e\x1c\x67\xdf\xb4\xcc\x0b\xe8\x09\x90\x83\xfa\xf8\xd0\x4f\xe7\x08\xe4\x9a\x7d\xee\x6e\x10\x9d\x1d\xe5\x27\x82\x29\xa8\x01\xa7\xba\x10\xb7\xce\x3b\xc8\x90\xcf\xa2\xf9\x36\x8d\xf3\xb7\xea\x5e\x94\x30\x76\x71\x35\x25\xba\xc7\x38\x60\xf3\x3d\x29\xbf\x29\xf3\x59\x73\x9e\xe6\x25\xaa\x5a\x26\x05\xa1\x8c\xb2\x83\x4f\x82\x15\xd1\x2f\x8f\xb3\x28\x7d\x89\xd2\xba\xc0\x10\xa6\xff\x02\xf9\x33\x7a\x3b\x5c\x99\xe2\x72\xbf\xc9\x72\x12\x73\xbe\xff\x8f\x65\x01\xfd\x4f\xaa\x25\xe2\x04\xdf\x4d\x74\xc8\xb6\x80\x9d\xa0\x5c\xa0\x6a\xc2\x05\xec\x88\xb5\xd5\xb8\x4b\xd6\xcb\x65\xbc\x5a\x56\x12\xbb\xd0\x0d\xfb\x18\xf5\x0e\x86\xe2\x7e\x5f\x07\x84\x0e\x49\x1f\x34\x87\x31\x98\xe7\xa6\x43\xa9\x09\x2e\x5e\xa0\xca\x6a\x8b\x38\x29\x6a\x7d\xf2\x6c\x10\x08\xbf\xf7\xc5\xf2\x23\x30\x77\x21\x31\x7a\x98\x65\xf1\x1c\x59\x0d\xab\x71\x37\xdb\xe6\x4f\xf7\xb5\xbe\x83\xea\xb3\x80\xb2\xf8\x79\x93\x44\xc8\xfa\x27\x00\x00\xff\xff\x6b\x4e\x0b\x40\xe2\x06\x00\x00") func masterEtcEtcdEtcdConfBytes() ([]byte, error) { return bindataRead( @@ -118,7 +118,7 @@ func masterEtcOriginMasterHtpasswd() (*asset, error) { return a, nil } -var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x6b\x6f\x1b\xbb\xd1\xfe\xae\x5f\x41\x04\x07\xc8\xc9\x8b\x77\x77\x25\x3b\xd7\x05\x8a\x42\xb5\x9d\x13\xe1\xd8\xb1\x2a\x3b\x45\x81\xba\x08\x28\x72\xb4\x62\xc4\x25\x37\xbc\x28\x56\xdc\xfc\xf7\x82\x97\xbd\x4a\x4a\xd2\x9c\xa0\x75\x02\xc4\x4b\x3e\x33\x1c\x72\x1e\xce\x85\xc1\xb4\x64\x5a\x33\x29\xce\xa4\x58\xb1\x22\x1f\x21\x54\x71\x5b\xb0\xce\x37\x42\x7f\xb1\x8c\xd3\x73\x58\x61\xcb\x8d\x0e\x43\x08\x11\x0f\xb0\x0a\x1b\x26\x45\x3d\x88\x10\xae\xd8\xdf\x40\x39\x8d\x39\xda\x4e\x9a\x61\x10\xdb\x1c\xfd\xe3\x9f\xcd\xf7\x86\x09\x9a\xf7\x15\x87\x15\x1b\x84\x02\x2d\xad\x22\xa0\x5b\xdd\x08\x71\x56\x32\xa3\x73\xf4\xf0\xa5\x33\xa8\xe0\xa3\x05\xdd\x19\xf6\x6a\xaf\xb7\xa0\x14\xa3\xf0\x83\x06\x77\x0c\x6c\x34\x75\x2c\x9c\x4b\x3a\x57\xa0\xc1\xfc\x98\x76\xca\x34\x5e\x72\xc8\xd1\x0a\x73\x0d\x83\x45\xe3\x81\x4c\xfb\xae\xf1\x20\x59\x81\xd0\x6b\xb6\x32\x29\x93\xd9\xac\xc4\x05\xcc\x25\x67\x64\xf7\x83\x4e\xb9\x07\x62\x1d\x72\x61\x79\xf7\x9c\x13\x54\x62\x43\xd6\x5e\xff\x54\x08\x69\xbc\xba\x9e\x23\x12\xb4\x81\x5d\x8e\x98\x83\xe8\xb4\x67\x16\x05\xb1\x4b\x1a\xd5\x1d\x19\x84\xb6\x98\x5b\xc8\xd1\x63\xa3\x2c\x3c\xee\xcc\x08\x5c\x42\xde\x9a\x93\x50\x10\x0c\x68\x07\x20\xc5\xe2\x10\x1d\x92\x86\x25\x39\xaa\x24\xd5\x47\xa6\x96\xce\x8b\xba\xc7\x98\x0f\x40\x4c\x8e\x9c\x1d\x9d\x61\xbd\x61\xd5\xb5\x5f\x89\x7b\x3b\x5e\x63\xc6\xad\x82\x01\x2e\x38\xa9\x73\xf8\xd1\x3f\xb8\x28\x14\x14\xd8\x48\xd5\xb9\x4b\x4a\xde\xef\xce\x38\x03\x61\x66\x62\x25\x83\xed\x04\x94\x79\xcd\x9c\xf7\x5b\x91\x64\xa5\xa4\x30\x89\xc7\xa7\x44\x19\x0f\xdc\xc0\xee\xab\xb8\x0d\xec\x46\xb8\x62\x97\xb0\x05\xae\xf3\x51\xe2\x7c\x3b\x70\x35\xb6\x66\xdd\x9a\x13\x6f\xca\x1b\xc0\x14\x54\x34\xc6\x1b\x77\x36\xcd\x51\x47\x73\x42\x70\x63\x44\x04\xc8\xb2\x94\xe2\x2d\x2e\x6b\x07\x24\x47\x8c\x1a\x05\x62\x19\x85\xc3\x2a\x73\x05\x2b\x76\xdf\x4a\xfd\x3d\x59\x40\x29\x0d\x24\x17\x0e\x93\xf8\xd1\x42\x49\x5b\x05\xf8\x3e\xee\x37\x37\xe9\x07\xad\x06\xe5\x98\x72\x0c\xf9\x4e\x83\x1a\x11\x29\x8c\x92\x9c\x43\xc7\x0b\xc0\x81\xb4\x17\x82\x4b\xb2\x79\xeb\x09\xd7\xd0\x36\x29\xb1\x36\xa0\x92\x56\xd8\xb1\x45\x83\xda\x32\x02\x37\xee\x1f\x51\x9c\x81\x8a\x97\x5d\xb3\x42\xd4\xc7\xd7\xf5\x66\xc4\x27\x61\xbe\x39\xc0\x8e\x1f\x07\x08\xe7\xbf\xce\x92\x39\x7a\xfc\x7f\x8f\x47\x44\x2a\x3d\xe5\x5c\x7e\x02\x7a\xad\x58\xc1\x84\xf7\xec\xaf\x7f\x66\x4f\xb2\x6c\x72\xf2\xe2\x2e\x1d\xfb\xbf\x93\x5f\xf3\x7f\xdd\x7d\x7e\xd2\x4c\x71\x49\x30\x5f\x4b\x6d\x06\xe3\x0f\x0f\xe8\xaf\x56\x1a\xb8\x02\x83\xd1\xaf\x4c\x50\xb8\x47\xe9\x95\xdf\x6e\x3a\x9b\x6b\x34\x7e\x92\xde\x18\xc5\x44\x81\xbe\x7c\x19\x88\x6e\xec\x12\x94\x00\x03\xfa\x2e\xa5\x21\x26\x7d\x1b\x71\x97\xea\x2d\xb9\x4b\x09\xb7\x6e\x89\xbb\xd4\xdb\x75\x54\xec\x6b\xc6\xa6\x17\xf7\xc6\x39\x9c\x07\x6b\xdf\x48\x6d\x9c\xf7\xf7\xed\x6c\xdc\x78\xcc\xcc\xbe\xda\xb8\xf9\xff\x44\x9f\xdf\xd4\x77\xee\x7d\x00\x9b\xbc\x38\xb9\x4b\x4f\x0f\xfb\xec\xc8\x42\xdf\x38\xbd\x46\x2a\x8e\x53\xa1\x5b\xaa\x2f\x99\xa0\x53\x4a\x15\x68\x9d\xa3\x71\xea\xff\xe4\x2f\xc7\xcf\x4e\xe3\xdc\x5b\x30\x9f\xa4\xda\xe4\xc8\x90\xea\xe9\x08\x0c\xa1\xfd\xe8\x44\x70\x8e\xc2\x65\x48\xdd\x64\x1b\x08\x5a\x9a\xf7\xa6\xbd\x70\x84\x34\x34\x3f\x80\x70\x54\x47\xc8\x2a\xee\xaf\x6d\x82\xd6\xc6\x54\x3a\xf7\xae\x39\xe0\x90\xfc\xe4\xf4\xc5\x2b\x6f\xdd\x8d\x91\x0a\x17\xd0\x6e\xb0\x3d\xf6\x38\x15\x02\x4c\xde\x99\x48\x99\x3c\x04\xec\x67\x40\x77\x8a\x37\xee\x14\x07\x6a\xba\xa9\xec\x00\xac\xab\xc4\x27\xbf\xd6\xb2\x95\x54\x25\x36\x39\xba\x9a\xde\xdc\x5e\x2c\xde\x5f\x2f\x2e\x7e\x7b\xff\x6e\x71\x99\xfc\xf2\x40\x64\x59\x49\x01\xc2\x7c\xc9\x7f\x79\xd8\x06\x0d\xae\x52\xe1\xd8\x80\x36\x75\x11\xc0\x86\x19\xc5\x29\x65\x22\xdc\x81\x05\x14\x4c\x1b\xb5\xab\x0f\x29\x47\x54\x92\x0d\xa8\x44\xc5\x89\x9a\x41\x8e\x40\xf9\xb3\xf1\x78\x3c\x0a\x79\x2a\x1c\x6e\x4c\x51\xee\x4c\x38\x98\x7d\x97\x13\x9c\x2c\xad\xa0\x1c\x8e\x79\x3b\x4a\x7e\xdd\xe1\x03\x50\xf0\x79\x25\x95\xc9\xd1\x64\x7c\xf2\x6c\x3c\x6a\x7d\xd2\x35\xcb\x19\x81\x2b\xe6\xe2\x2c\xa8\xa9\x2a\x6c\x09\xa2\xae\x33\x95\x15\x86\x95\x90\x90\x4e\x39\x9a\x38\xb4\xce\x34\x18\xc3\x44\xa1\xd3\xcd\x4b\xe7\xf2\x6c\x3b\xc1\xbc\x5a\xe3\xc9\x9f\x9a\x6c\xad\x83\xcf\x92\x25\x26\x1b\x10\xb4\x96\x76\xbc\x3a\xed\x01\x4a\xa0\x0c\x27\x66\x57\x41\xbb\x42\xc5\x19\xf1\x75\x4f\xb6\x15\x34\xed\xb0\xab\x52\xd2\xc8\xa5\x5d\xc5\xec\x28\x2d\x75\x99\x6f\xcb\x9a\x94\x9a\xa0\x47\xf8\xb3\x55\xf0\xa8\x83\xe8\xdb\xff\x28\x03\x43\x32\xe9\xc3\x7b\xe6\x01\xb5\x86\xcc\x4b\xa6\x0e\xee\xc4\xdb\xec\x30\x38\x97\x18\x21\x7c\x26\x61\xa2\x48\x9c\xc3\x92\x95\x73\xc6\xa1\x25\x82\x7f\xb2\x70\x99\x1f\x1d\x54\xb0\x81\xdd\xf7\xc8\x6f\x60\xf7\xe8\xbf\xb1\xf1\x32\xf2\xc3\x0a\x47\x9e\x66\x60\x36\xcf\xd1\xc3\xc3\xb7\xf2\x98\x67\x1d\xbd\xd8\x32\x9f\xf5\x6f\x59\x09\xd2\x9a\x1c\x09\xcb\xf9\xb7\x6b\xb2\xc8\xe5\x58\x07\x75\xe9\xbe\x4f\xf8\x1e\x28\xd0\x5d\x93\x35\x50\xdb\x73\x58\xbd\x70\x33\x15\x68\x1f\x34\x1d\x38\xe6\x06\x97\x7e\xd0\xbe\x80\x8e\x65\x83\x7e\x2b\x29\xcc\xa5\x32\x0b\x2c\x0a\x57\x46\x3f\xee\xcc\xdd\xd8\xa5\x00\x77\x56\x2f\x4e\xd2\x53\x1f\xf6\xb3\xc9\x73\x37\xef\x8a\x77\xe2\x24\x43\xf1\xe6\x9a\xb0\x78\xb8\xde\x6c\xcf\x27\x88\x99\xf6\xf7\x86\xe5\x67\xb1\xea\x13\x22\x94\x4e\x83\x6e\x0a\x13\x02\x95\x9b\x36\x20\xcc\xed\xae\x72\x8a\xbf\xe3\xca\xfc\x7f\x17\x13\x37\x87\xd0\xd2\x2a\x17\x05\x9f\x8e\xc7\xa3\xd8\xc3\xd4\x5a\xbf\x4b\xa9\x17\xfa\x58\xe9\x1c\x9d\x78\x0d\xfb\x9b\x71\xbf\xc5\x50\x13\x0e\xad\x09\xf2\x97\x52\x56\x2e\x3a\xfc\x0f\xb6\xfb\xfc\x0f\x6f\xf7\xd4\x6b\xd8\xdb\x4b\x77\xb7\xc3\x0a\xd7\x2b\x0c\x57\x32\xb2\x60\x6e\x97\x9c\x91\x77\x8b\xcb\xbc\x97\x93\x8f\xd6\x5e\x79\x27\x63\x3b\x2e\xba\xeb\x26\x42\x51\xd1\x46\xf3\x18\x5c\x62\xb1\x71\x36\x3b\x5f\xb8\x0c\x90\x4e\x4e\x5e\x06\x62\x3e\xdd\xc3\xc4\xb2\x80\x30\xaa\xf6\xa1\x08\xb9\xc2\x36\x30\xfc\x12\x44\x61\xd6\x39\x7a\xd5\xf1\xf4\x6c\xde\x59\x29\x6a\x8a\xb5\x4f\xe6\x8e\xe8\xb0\x74\xb4\x7a\xee\x9f\x3b\x42\x43\xa0\x80\xae\xb1\x69\x2b\xac\x44\x6e\x75\xa2\xbd\x64\x7b\xd5\xfa\xbb\xea\x5d\x37\xd9\x6f\xb7\xb0\xd6\x60\x7e\xc2\x01\x67\x44\x0a\x2d\x39\x64\x23\xd7\x2b\x61\x4f\xd4\x26\xa8\x96\x60\xd6\x92\xe6\x08\x5b\xe3\x0a\x16\x46\x41\x18\x66\x76\xf3\x18\x57\x75\x3e\x7a\x78\x48\x10\x5b\xa1\xf4\x42\xe0\x25\x87\xe9\xf4\x7c\x6a\xcd\xda\xa1\x02\xd1\x7c\xbc\x4c\x62\x0f\x3e\x75\x51\x18\x4d\xcf\x03\x35\xd7\x98\x73\xf0\xb1\xa6\x7d\xa7\xe0\xb2\x60\xa2\xd3\x12\x97\xb8\xaa\x98\x28\xae\xa2\x19\x84\x63\x56\xfa\x89\x7e\x6e\x38\xf2\x08\x11\x8a\x94\xeb\x0a\xc4\xec\x7c\x36\x30\xbd\x6e\xb0\x42\xa8\x3e\xf7\x91\x3f\xf5\x06\x86\xfd\xa7\xd3\xe9\x79\x8c\xe3\xe7\x21\xea\xb7\xf0\x1b\x20\xca\x85\xc3\xa3\x22\x01\xd0\x15\xc3\xac\xec\x3c\x2e\x30\xda\x7d\x0f\xd1\x76\xd9\x7c\x55\x0a\x56\xa0\x14\xd0\x77\xb1\x1f\xed\x02\xad\x60\x1f\x2d\xbc\x77\xc3\xcd\xe8\x10\xd3\x9b\x84\x12\x33\xde\x9d\xf5\x03\xf1\xbb\xae\x97\xe3\x01\x5a\xb3\x96\x8a\x7d\x86\x96\x49\xde\x19\x69\xc9\x88\x92\x5a\xae\x8c\x14\x9c\x09\x97\x44\xcb\x6c\xb8\xf1\x5b\x10\x38\x1e\x54\xe6\x69\x7a\x92\x35\xfa\x9a\x15\x8c\xdc\x80\xf8\x49\xda\xbd\x2e\xcf\x3d\x10\xb4\xc7\xb1\x4b\xd7\xd4\xa0\x0a\x6b\xfd\x49\x2a\x3a\x64\x5a\x43\xac\x9f\x4b\xb4\xd5\xb1\x6c\xbb\x36\xde\x12\xda\x23\xe4\x9b\xdb\xb9\x1f\x9c\x47\x23\x0f\x50\x33\x26\xd1\xe9\x7e\xf1\xfc\xf3\x02\x6b\xad\xeb\x8f\x6a\xd1\x30\x78\xe3\x6d\x86\xae\xf0\xfd\xb4\x80\x1b\x97\x13\xa8\x4b\x29\x75\x56\x8a\xd3\x21\x2c\x6a\x2d\xba\x83\xe1\xea\xe8\xe3\xf5\x4b\x80\x25\x3a\xe0\xd2\x1d\x2e\x1d\xa1\x3d\x21\xba\x26\xb8\xac\xaa\xf5\xad\x1b\x1e\x98\xf1\xf2\x79\x5d\x0c\x34\x1c\x3d\x04\x7b\x36\x1e\x8f\x2a\x6c\xb5\x63\x61\xfb\x88\x12\x42\x55\x35\x68\xa4\x96\x52\x1a\x6d\x14\xae\x42\x87\x75\xd4\xf8\x20\x57\x57\x5e\x4d\x26\x98\x89\x95\xc2\xda\x28\x4b\x8c\x55\xa1\x94\xaa\x30\xe9\xbd\x21\x31\x07\xe9\xca\xdc\xac\xb1\x02\xda\x3c\x5c\x1e\x12\x1a\x55\x4a\x7e\x00\xd2\x09\xe8\xb1\x8d\x73\x05\xdb\x8d\x7f\xb7\x92\x2a\x47\x42\x52\x48\x94\xe4\x90\xf6\xba\xdc\xcc\x35\x96\xd6\x40\xdd\xf1\x44\x65\x8b\xf0\xca\x77\x05\x5a\xe3\xa6\x4e\xec\xcf\xdd\x42\x59\xb9\xee\xb3\x29\x22\x89\x55\xcc\xec\xa6\x9c\x4b\x82\xdd\x92\xe1\xc6\x11\xdd\x8c\xc4\x9a\x53\x8f\xf3\xec\xa4\x9e\xbc\xc4\x4b\xe0\x7a\x0e\x6a\x1e\x94\xe7\xe8\x59\x78\xa8\x63\x74\x28\x37\x19\xd7\x3f\xc9\xe4\x55\xfd\x93\xf9\xd1\x91\x92\xd6\x35\x72\xed\x19\x68\xbb\xa4\xb2\xc4\xee\xf6\xdf\x5e\x5c\xcd\x17\xd7\xef\x6e\x2f\x16\xb3\x79\x2a\x58\xe5\xba\xf2\x98\x87\xa7\x84\xb8\xf6\xa0\x15\xf3\xff\x39\x10\xc8\xb9\x70\x21\x1a\x04\x01\xdd\xa6\xae\x12\x0b\x5c\x00\x6d\xde\x30\x93\xfa\xac\xfd\xef\xfe\x8d\xd8\x5f\x6c\x37\x5e\x71\xb9\xfb\xc6\x2d\xaf\x14\xdb\x62\x03\xbf\x0f\xde\xf6\x70\xb0\xca\xd5\x6b\x7e\xbe\xee\x82\x7d\x34\x88\xe0\xb8\xfc\x9e\x84\xc7\x78\x01\x1d\xde\x1c\xeb\xfe\xe4\xe0\xab\xce\xc1\x9b\xbe\xff\xc6\x73\xa0\xb7\xd1\xbe\xd3\xae\x5b\xfd\xe6\x01\xb8\x79\xeb\x19\xb6\x39\x11\x1f\x76\x52\xe2\xfb\x48\x22\x3d\x13\xaf\x39\x2b\xd6\x26\xdc\xc4\xe6\x79\x39\x36\x5c\xfd\xa0\xb2\x95\xdc\x96\x9d\xd7\x12\xba\x13\xb8\x64\xc4\x07\x54\x17\x2d\x98\x28\x42\x7d\x42\x63\xc8\xff\x77\x00\x00\x00\xff\xff\x95\x6c\x5f\xfe\x9a\x1a\x00\x00") +var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x79\x6f\x1b\xbb\x11\xff\x5f\x9f\x82\x10\x1e\x90\xa4\xe8\xae\x64\x3b\xe7\x02\x45\xa1\xda\xce\x8b\xf1\xec\x58\x95\x9d\xa2\x40\x5d\x04\x14\x39\x5a\x31\xe2\x92\x1b\x1e\x8a\x15\xbf\x7c\xf7\x82\xc7\xee\x72\x75\x24\x41\x9a\xf6\x39\x01\xe2\x25\x67\x86\xc3\x99\x1f\xe7\x0a\xa6\x15\xd3\x9a\x49\x71\x2a\xc5\x82\x95\xc5\x00\xa1\x9a\xdb\x92\x25\xdf\x08\xfd\xcd\x32\x4e\xcf\x60\x81\x2d\x37\x3a\x2c\x21\x44\x3c\x81\x55\xd8\x30\x29\x9a\x45\x84\x70\xcd\xfe\x01\xca\x49\x2c\xd0\xfa\xa8\x5d\x06\xb1\x2e\xd0\xbf\xfe\xdd\x7e\xaf\x98\xa0\x45\x5f\x70\x38\xb1\xa5\x50\xa0\xa5\x55\x04\x74\x27\x1b\x21\xce\x2a\x66\x74\x81\x1e\xbe\x24\x8b\x0a\x3e\x5a\xd0\xc9\xb2\x17\x7b\xbd\x06\xa5\x18\x85\x1f\x54\x38\x51\xb0\x95\x94\x68\x38\x95\x74\xaa\x40\x83\xf9\x31\xe9\x94\x69\x3c\xe7\x50\xa0\x05\xe6\x1a\xb6\x0e\x8d\x06\x99\xf4\x5d\xe3\x89\x64\x0d\x42\x2f\xd9\xc2\xe4\x4c\x8e\x2e\x2a\x5c\xc2\x54\x72\x46\x36\x3f\xe8\x94\x7b\x20\xd6\x51\xce\x2c\x4f\xed\x9c\xa1\x0a\x1b\xb2\xf4\xf2\x27\x42\x48\xe3\xc5\xf5\x1c\x91\xa1\x15\x6c\x0a\xc4\x1c\x89\xce\x7b\x6a\x51\x10\x9b\xac\x15\x9d\xf0\x20\xb4\xc6\xdc\x42\x81\x1e\x19\x65\xe1\x51\xb2\x23\x70\x05\x45\xa7\x4e\x46\x41\x30\xa0\x09\x81\x14\xb3\x7d\x70\xc8\x5a\x94\x14\xa8\x96\x54\x1f\xd8\x9a\x3b\x2f\xea\x1e\x62\x3e\x00\x31\x05\x72\x7a\x24\xcb\x7a\xc5\xea\x6b\x7f\x12\xf7\x7a\xbc\xc6\x8c\x5b\x05\x5b\x74\xc1\x49\x89\xf1\xa3\x7f\x70\x59\x2a\x28\xb1\x91\x2a\x79\x4b\x4a\xde\x6f\x4e\x39\x03\x61\x2e\xc4\x42\x06\xdd\x09\x28\xf3\x9a\x39\xef\x77\x2c\xd9\x42\x49\x61\x32\x4f\x9f\x13\x65\x3c\xe1\x0a\x36\x5f\xa5\x5b\xc1\x66\x80\x6b\x76\x09\x6b\xe0\xba\x18\x64\xce\xb7\x5b\xae\xc6\xd6\x2c\x3b\x75\xe2\x4b\x79\x03\x98\x82\x8a\xca\x78\xe5\x4e\x27\x05\x4a\x24\x67\x04\xb7\x4a\x44\x02\x59\x55\x52\xbc\xc5\x55\xe3\x80\xec\x80\x52\x83\x00\x2c\xa3\x70\x38\x65\xaa\x60\xc1\xee\x3b\xae\x7f\x66\x33\xa8\xa4\x81\xec\xdc\xd1\x64\x7e\xb5\x54\xd2\xd6\x81\x7c\x97\xee\x57\xb7\xe9\x17\xad\x06\xe5\x90\x72\x88\xf2\x9d\x06\x35\x20\x52\x18\x25\x39\x87\xc4\x0b\xc0\x81\x74\x0f\x82\x4b\xb2\x7a\xeb\x01\xd7\xc2\x36\xab\xb0\x36\xa0\xb2\x8e\xd9\xa1\x45\x83\x5a\x33\x02\x37\xee\x1f\x51\x9e\x82\x8a\x8f\x5d\xb3\x52\x34\xe6\x4b\xbd\x19\xe9\xb3\xb0\xdf\x1a\x30\xf1\xe3\x16\x85\xf3\x5f\x72\x64\x81\x1e\xfd\xe9\xd1\x80\x48\xa5\x27\x9c\xcb\x4f\x40\xaf\x15\x2b\x99\xf0\x9e\x7d\xfc\x57\xf6\x64\x34\x3a\x3a\x7e\x71\x97\x8f\xfd\xdf\xa3\xc7\xc5\xef\x77\x9f\x9f\xb4\x5b\x5c\x12\xcc\x97\x52\x9b\x76\xfd\xe1\x01\xd5\x8a\x09\x83\x86\x81\x62\x88\x1e\xff\xdd\x4a\x03\x57\x60\x30\x7a\xcc\x04\x85\x7b\x94\x5f\xf9\x8b\xe7\x17\x53\x8d\xc6\x4f\xf2\x1b\xa3\x98\x28\x9f\xa0\xa1\x13\x72\xf7\xf9\xc9\x10\xfd\x8e\x3e\x3a\x1e\xf4\xe5\x4b\x7b\xd2\xca\xce\x41\x09\x30\xa0\xef\x72\x1a\x42\xd5\x96\x2e\x7b\x28\xee\x72\xbd\x26\x77\x39\xe1\xd6\x9d\x77\x97\x7b\x75\x0f\xb2\x7d\xdf\x1d\xf2\xf3\x7b\xe3\x10\xc1\xc3\x25\xde\x48\x6d\x1c\x3c\xbe\xa1\x7e\xeb\xf4\x5d\xed\xbf\x7e\x5a\x34\xd5\x0f\x1f\xe3\x4d\xf0\x9d\x96\xda\x22\x3b\x7a\x71\x7c\x97\x9f\xec\x77\xfc\x81\x83\xbe\x61\xeb\x96\x2b\xae\x53\xa1\xbb\xf7\x32\x67\x82\x4e\x28\x55\xa0\x75\x81\xc6\xb9\xff\x53\xbc\x1c\x3f\x3b\x89\x7b\x6f\xc1\x7c\x92\x6a\x55\x20\x43\xea\xa7\x03\x30\x84\xf6\x43\x1c\xc1\x05\x0a\x2f\x2a\x77\x9b\x5d\x34\xe9\xde\x4a\x6f\xdb\x33\x47\x92\xf6\xad\xec\xa1\x70\xef\x05\x21\xab\xb8\x7f\xfb\xa9\xbf\x96\xc6\xd4\xba\x70\x1e\xdb\x76\x13\x1a\x16\xc7\x27\x2f\x5e\xf5\x7c\xe4\x64\xde\x18\xa9\x70\x09\xdd\xa5\x3b\x57\xc4\xad\x10\xb9\x8a\x64\x23\x67\x72\x1f\x61\x3f\xb5\x3a\xcb\xde\x38\xcb\x6e\x89\x49\x73\xe4\x1e\xb2\x54\x88\xcf\xaa\x9d\x66\x0b\xa9\x2a\x6c\x0a\x74\x35\xb9\xb9\x3d\x9f\xbd\xbf\x9e\x9d\xff\xfa\xfe\xdd\xec\x32\xfb\xe5\x81\xc8\xaa\x96\x02\x84\xf9\x52\xfc\xf2\xb0\x0e\x12\x5c\x09\xc4\xb1\x01\x6d\x9a\xea\x82\x6d\xa7\x2a\x27\x94\x89\xf0\x76\x66\x50\x32\x6d\xd4\xa6\xb1\x57\x81\xa8\x24\x2b\x50\x99\x8a\x1b\x0d\xaa\x1c\xa8\x8a\x67\xe3\xf1\x78\x10\x12\x60\xb0\x73\xcc\x7d\xce\x26\x1c\xcc\x2e\x0c\x08\xce\xe6\x56\x50\x0e\x87\x10\x10\x39\xbf\x0e\x82\x2d\xa2\x80\x83\x5a\x2a\x53\xa0\xa3\xf1\xf1\xb3\xf1\xa0\xf3\x49\xaa\x96\x53\x02\xd7\xcc\x05\x70\x50\x13\x55\xda\x0a\x44\x53\xc0\x2a\x2b\x0c\xab\x20\x23\x49\x9d\x9b\x39\x6a\x3d\xd2\x60\x0c\x13\xa5\xce\x57\x2f\x9d\xcb\x47\xeb\x23\xcc\xeb\x25\x3e\xfa\x4b\x5b\x06\xe8\xe0\xb3\x6c\x8e\xc9\x0a\x04\x6d\xb8\x1d\xae\x4e\x7a\x04\x15\x50\x86\x33\xb3\xa9\xa1\x3b\xa1\xe6\x8c\xf8\x82\x6a\xb4\x16\x34\x4f\xd0\x55\x2b\x69\xe4\xdc\x2e\x62\xda\x95\x96\xba\x94\xba\x66\x6d\xae\xce\xd0\x10\x7f\xb6\x0a\x86\x09\x45\x5f\xff\xe1\x08\x0c\x19\x49\x9f\x37\x46\x9e\xa0\x91\x30\xf2\x9c\xb9\x23\x77\xec\x5d\xda\xd9\xb2\x4b\x8c\x1a\x3e\x45\x31\x51\x66\xce\x61\xd9\xc2\x39\x63\xdf\x11\xc1\x3f\xa3\xf0\xc0\x87\x7b\x05\xac\x60\xf3\x3d\xfc\x2b\xd8\x0c\xff\x1f\x17\xaf\x22\x3e\xac\x70\xe0\x69\x17\x2e\xa6\x85\x0b\x27\x5f\x4b\x8b\x69\x04\x71\xe8\xa3\xe7\x6b\xe6\xcb\x8a\x5b\x56\x81\xb4\xa6\x40\xc2\x72\xfe\xed\xa2\x2f\x62\x3a\x16\x5a\x29\xec\x77\x81\xdf\x23\x0a\xb0\xd7\x64\x09\xd4\xf6\x1c\xd7\x1c\xdc\x6e\x05\xf8\x07\x49\x7b\xcc\xdd\xd2\xe5\x1f\xb4\xaf\xd0\x63\x5d\xa2\xdf\x4a\x0a\x53\xa9\xcc\x0c\x8b\xd2\xd5\xe9\x8f\x92\xbd\x1b\x3b\x17\xe0\x6c\xf6\xe2\x38\x3f\xf1\x29\x61\x74\xf4\xdc\xed\xbb\xee\x80\x38\xce\x50\x1d\xba\x2e\x2f\x1a\xd9\xab\xed\x71\x05\x31\x53\xff\xd6\xa2\xfd\x34\x96\x95\x42\x84\xda\x6c\xab\x5d\xc3\x84\x40\xed\xb6\x0d\x08\x73\xbb\xa9\x9d\xe0\xef\x78\x3a\x7f\x4e\x69\xe2\xe5\x10\x9a\x5b\xe5\xa2\xe1\xd3\xf1\x78\x10\x9b\xa4\x46\xea\x77\x09\xf5\x4c\x1f\x6b\x5d\xa0\x63\x2f\x61\xf7\x32\xee\xb7\x18\x72\x82\xd1\xda\x60\x7f\x29\x65\xed\xa2\xc4\x1f\x70\xdd\xe7\xff\xf5\x75\x4f\xbc\x84\x9d\xbb\xa4\xb7\xdd\x2e\xa1\xbd\xc0\xf0\x34\x23\x0a\xa6\x76\xce\x19\x79\x37\xbb\x2c\xf6\xe7\xeb\xfd\x45\x1c\x1a\x16\x5d\x2e\x77\x90\x4c\x5f\x9f\x08\xf5\x47\x17\xe4\x63\xcc\x89\x75\xc9\xe9\xc5\xd9\xcc\x25\x86\xfc\xe8\xf8\x65\xc0\xe9\xd3\x1d\x9a\x58\x41\x10\x46\xd5\x2e\x29\x42\xae\x90\x0e\x80\xbf\x04\x51\x9a\x65\x81\x5e\x25\x8e\xbf\x98\x26\x27\x45\x49\xb1\x4c\x1a\x39\x8b\xed\xe7\x8e\x5a\x4f\xfd\x78\x25\x34\x20\x0a\xe8\x12\x9b\xae\x18\xcb\xe4\x5a\x67\xda\x73\x76\x2f\xaf\x7f\xab\xde\xeb\x93\xfd\xf6\x0e\x6b\x0d\xe6\x67\xd9\x7b\x38\x22\x52\x68\xc9\x61\x34\xec\x47\xbe\x52\x61\x8f\xe4\x36\xfa\x56\x60\x96\x92\x16\x08\x5b\xe3\x2a\x1b\x46\x41\x18\x66\x36\xd3\x18\x80\x75\x31\x78\x78\xc8\x10\x5b\xa0\xfc\x5c\xe0\x39\x87\xc9\xe4\x6c\x62\xcd\xd2\x51\x05\x24\x7a\xb1\x59\x9c\x02\x4c\x5c\xb8\x46\x93\xb3\x80\xdd\x25\xe6\x1c\x7c\x30\xea\x26\x25\x5c\x96\x4c\x24\x4d\x79\x85\xeb\x9a\x89\xf2\x2a\xaa\x41\x38\x66\x95\xdf\xe8\x27\x91\x03\x63\x90\x50\xcd\x5c\xd7\x20\x2e\xce\x2e\xb6\x54\x6f\x5a\xbc\x10\xcb\xcf\xbc\x45\x73\xaf\x60\xb8\x7f\x3e\x99\x9c\xc5\x40\x7f\xd6\x37\x52\xc7\x76\x03\x44\xb9\xb8\x79\x90\x35\x10\xec\x63\xc7\xac\x4a\xc6\x1d\x8c\xa6\x13\x1a\x6d\xe7\xed\x57\xad\x60\x01\x4a\x01\x7d\x17\x3b\xe4\x94\xd0\x0a\xf6\xd1\xc2\x7b\xb7\xdc\xae\x6e\xd3\xf4\x36\xa1\xc2\x8c\xa7\xbb\x7e\x21\x7e\x37\xc5\x77\x34\xa8\x35\x4b\xa9\xd8\x67\xd8\x87\x35\xef\xa6\xbc\x62\x44\x49\x2d\x17\x46\x0a\xce\x84\xcb\xc3\x95\x43\x61\x6a\x89\x5b\x10\xd8\x5b\x70\x38\xf2\x80\x3e\x1e\xb5\x72\x87\xbb\x66\x41\xc8\xc8\x15\x88\x9f\x7c\xa2\x97\xd9\x3b\xcd\x61\x16\x04\xed\x61\xf3\xd2\x75\x52\xa8\xc6\x5a\x7f\x92\x8a\x6e\x23\xb4\x05\xe4\xcf\x05\xe8\xe2\x50\x1a\x5f\x1a\xaf\x09\xed\x01\xf9\xcd\xed\xd4\x2f\x4e\xa3\x92\x7b\x20\x1d\xb3\xf3\x64\xb7\x3a\xff\x9f\x45\xec\x46\xf4\x4f\x16\xaa\x61\x6b\x76\xdd\x2e\x5d\xe1\xfb\x49\x09\x37\x2e\x15\x51\x97\xc9\x9a\x64\x18\xb7\x43\xf8\xd5\x5a\xa4\x8b\xe1\x21\xea\xc3\x65\x53\x20\xcb\x74\xa0\xcb\x37\xb8\x72\xcf\xc2\x43\x27\x55\xc1\x25\x73\xad\x6f\xdd\xf2\x96\x1a\x2f\x9f\x37\x35\x48\x8b\xf0\x7d\x64\xcf\xc6\xe3\x41\x8d\xad\x76\x70\xed\x86\x43\x21\x00\xd6\x5b\x7d\xdc\x5c\x4a\xa3\x8d\xc2\x75\x68\xf0\x0e\x2a\x1f\xf8\x9a\x82\xaf\xcd\x38\x17\x62\xa1\xb0\x36\xca\x12\x63\x55\xa8\xe0\x6a\x4c\x7a\xb3\x31\xe6\x48\x52\x9e\x9b\x25\x56\x40\xdb\x81\xec\x3e\xa6\x41\xad\xe4\x07\x20\x49\x9a\x88\x5d\xa4\xab\x13\x6f\xfc\x3c\x4e\xaa\x02\x09\x49\x21\x53\x92\x43\xde\x6b\xb2\x47\xae\xaf\xb5\x06\x9a\x86\x2b\x0a\x9b\x85\xe9\xe5\x15\x68\x8d\xdb\xf2\xb4\xbf\x77\x0b\x55\xed\x9a\xdf\xb6\x76\x25\x56\x31\xb3\x99\x70\x2e\x09\x76\x47\x86\xf7\x48\x74\xbb\x12\x4b\x5d\x3d\x2e\x46\xc7\xcd\xe6\x25\x9e\x03\xd7\x53\x50\xd3\x20\xbc\x40\xcf\xc2\x00\x92\xd1\x6d\xbe\xa3\x71\xf3\x93\x1d\xbd\x6a\x7e\x46\x7e\x75\xa0\xa4\x75\x7d\x64\x67\x03\x6d\xe7\x54\x56\xd8\xc5\x86\xdb\xf3\xab\xe9\xec\xfa\xdd\xed\xf9\xec\x62\x9a\x0b\x56\xe7\x4c\x0e\x62\xbe\x9f\x10\xe2\xba\x93\x8e\xcd\xff\xa7\x47\x00\xe7\xcc\x05\x7a\x10\x04\x74\x97\x10\x2b\x2c\x70\x09\xb4\x9d\xcd\x66\x8d\xad\xfd\xef\x7e\xf6\xed\x9f\xbd\x5b\xaf\xb9\xdc\x7c\x23\x06\xd4\x8a\xad\xb1\x81\xdf\xb6\x66\x96\x38\x68\xe5\xca\x44\xbf\xdf\x34\xe1\x3e\x56\x44\xe2\x78\xfc\x0e\x87\xa7\xf1\x0c\x3a\xcc\x52\x9b\xb6\xa8\x37\x68\xea\xc2\x42\x33\x72\xfa\xda\xd3\xdf\x9d\x43\xed\xe9\xb1\xb4\xef\xfc\x9b\xd1\x43\x3b\xe9\x6e\xe7\x51\xdb\xed\x56\xa4\x0f\x57\xab\xf0\x7d\x44\x95\xbe\x10\xaf\x39\x2b\x97\x26\x3c\xcd\x76\x8e\x1e\x1b\xbf\x7e\x94\x59\x4b\x6e\xab\x64\x7a\x43\x37\x02\x57\x8c\xf8\xf8\xeb\xc2\x07\x13\x65\x28\x83\x68\xcc\x10\xff\x09\x00\x00\xff\xff\xd7\x36\xe9\x25\x83\x1b\x00\x00") func masterEtcOriginMasterMasterConfigYamlBytes() ([]byte, error) { return bindataRead( @@ -158,7 +158,7 @@ func masterEtcOriginMasterSchedulerJson() (*asset, error) { return a, nil } -var _masterEtcOriginMasterSessionSecretsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4a\x2c\xc8\x0c\x4b\x2d\x2a\xce\xcc\xcf\xb3\x52\x28\x33\xe4\xca\xce\xcc\x4b\xb1\x52\x08\x4e\x2d\x06\x89\x04\xa7\x26\x17\xa5\x96\x14\x73\x15\x43\x68\x2b\x2e\x5d\x85\xc4\xd2\x92\x8c\xd4\xbc\x92\xcc\xe4\xc4\x12\xb0\x16\xa5\xea\x6a\x05\x3d\xc7\xd2\x92\x0c\x88\x5a\x85\xda\x5a\x25\x2e\x05\x85\xd4\xbc\xe4\xa2\xca\x02\x24\x15\xae\x79\xc9\x48\x0a\x00\x01\x00\x00\xff\xff\x58\x97\xb9\x86\x74\x00\x00\x00") +var _masterEtcOriginMasterSessionSecretsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4a\x2c\xc8\x0c\x4b\x2d\x2a\xce\xcc\xcf\xb3\x52\x28\x33\xe4\xca\xce\xcc\x4b\xb1\x52\x08\x4e\x2d\x06\x89\x04\xa7\x26\x17\xa5\x96\x14\x73\x15\x43\x68\x2b\x2e\x5d\x85\xc4\xd2\x92\x8c\xd4\xbc\x92\xcc\xe4\xc4\x12\xb0\x96\xea\x6a\x05\x3d\xc7\xd2\x92\x0c\x88\x52\x85\x1a\x85\xc2\xd2\xfc\x92\x54\x85\xda\x5a\x2e\x05\x85\xd4\xbc\xe4\xa2\xca\x02\x84\x3a\xd7\xbc\x64\x4c\x65\x80\x00\x00\x00\xff\xff\xc1\xc1\xc9\xa5\x80\x00\x00\x00") func masterEtcOriginMasterSessionSecretsYamlBytes() ([]byte, error) { return bindataRead( @@ -198,7 +198,7 @@ func masterTmpAnsibleAnsibleSh() (*asset, error) { return a, nil } -var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\x6d\x6f\xdb\x38\x12\xfe\x9e\x5f\x31\x70\x0b\xf8\x0e\xa8\x6d\xb4\x5d\xe0\x6e\x05\xf4\x83\x37\xab\x36\xc1\x36\x71\x60\x3b\x87\xf6\x16\x0b\x2d\x45\x8d\x2c\x5e\x68\x8e\x96\xa4\xec\x78\x83\xfc\xf7\xc3\x88\x94\xe3\x28\x71\x9b\xdd\x7c\x8a\xcc\x79\x79\x38\xf3\xcc\x0b\x47\xa3\xd1\xc9\x2b\xf8\x78\xfe\xe5\x22\x4d\x60\x6a\x76\xbe\x52\x66\x05\x39\x6a\xda\x82\x32\x20\xb4\x1e\x49\x51\x3b\xa8\x84\x03\xe5\x87\x0e\x36\x42\x37\x08\x16\x6b\x2d\x24\x16\x90\xef\xc0\x61\xc1\xa2\xbe\xc2\x93\x57\x10\xff\xa8\x46\xe3\x2a\x55\xfa\xb5\x70\x1e\xad\x93\x56\xd5\x7e\xec\x2a\xd8\x56\x4a\x56\xa0\x1c\x18\xf2\xa0\x0a\x14\xfa\x0d\x6c\x11\x5c\x45\x8d\x2e\xa0\x54\xb7\xe0\x2b\xe1\xc7\x27\x9a\xa4\xd0\x41\x39\x39\x01\xa8\xc8\x79\xc7\xff\x00\xb4\x27\xfc\x7d\x02\xb0\x11\x36\xfe\x2a\x8c\x53\xb9\xc6\x4c\x92\x31\x28\xbd\x22\x93\x04\xc9\x47\xa7\xf5\xce\x57\x64\x32\x65\x3c\xda\xda\x22\x1b\x87\x49\xe3\xec\x24\x57\x66\x12\x0e\xdf\x9d\x9c\x3c\xba\x40\xa6\x1c\x1b\xf5\x42\x19\xb4\xea\x4f\x2c\x12\xf8\x28\xb4\xc3\x9e\x94\xd4\x0a\x8d\xcf\x72\x65\x84\xdd\x1d\x18\x25\xf9\xc4\xde\x5a\xac\x30\xf3\x62\x95\xc0\x60\xf3\x9f\x74\xbe\x38\x9f\x5d\x0e\xfa\x42\x5b\xcc\xd9\xab\x23\x8d\x51\xc1\x88\x35\x26\x30\x38\xbf\x98\x7e\x4a\xb3\xab\x79\xfa\xf1\xfc\xcb\x24\x7c\x2c\xbf\x5e\xa5\xa3\x2d\xe6\xa3\xa8\x90\xf4\xad\x5a\x5c\x65\x8d\xd5\x09\x0c\x2f\xa6\x8b\x65\x3a\xcf\x66\xf3\xf4\x53\x76\x3d\xff\x3c\x7a\x7d\x27\x69\x5d\x93\x41\xe3\xef\x93\xd7\x77\x1b\xb4\x4e\x91\xb9\x1f\xf6\xd0\x84\x3c\x64\x05\x96\xa2\xd1\x3e\x73\x4d\x5e\xd0\x5a\x28\x93\xc0\x70\x99\x5e\x5c\xcd\x67\xd7\xcb\x74\x7e\x7e\x35\x36\xaa\x1e\x2b\x1a\x06\xb7\x91\x55\xf1\xff\x65\xa5\x5c\x97\xe5\x1c\xc1\xef\x6a\xfc\xa0\x4c\x69\xc5\x1b\xc8\x1b\xcf\x14\xa8\xc4\x06\xc1\x13\x68\xb5\x41\xd8\x2a\x5f\x81\xc5\x95\x22\x13\xc4\xa0\x24\x0b\x86\xb6\xd1\x5c\x8e\x52\x34\x0e\x81\x4a\xd0\xb8\x12\x72\x07\x16\x85\x23\xe3\x7a\xc8\x2d\x35\x8c\xdc\xa1\x46\xe9\xc9\x26\x30\x3c\x34\xda\xbf\x27\x9f\x39\x6f\x77\xdf\x96\x7f\x05\x97\xd4\x52\x17\xb6\xd5\x0e\x3c\x5f\x4c\x39\x10\x50\xa8\xb2\x44\x8b\xc6\x43\x21\xbc\x68\xaf\x18\x2e\xa7\x3c\xa8\x3e\xb0\xda\xd2\x1a\x7d\x85\x8d\xcb\x0c\x15\x78\xe0\xf1\x2e\xba\x1c\x26\x30\x0c\x5e\xef\xbb\x80\x2e\xd0\xc3\xf4\xcf\xc6\x22\xb8\x1a\xa5\x2a\x95\x0c\xae\x38\x36\x5c\x11\x58\x40\x77\x85\x9e\xbb\x70\x7a\x70\x41\x4f\x96\x59\x55\x5b\xda\xa8\x82\xcb\x60\x20\xd8\x70\x96\x6b\xca\x07\x2f\x54\xbe\x51\xa6\x48\x60\x40\xf9\xff\x50\xfa\x97\x2a\x3d\xb8\xc9\x84\x94\xd4\x18\x1f\x98\x3d\x9c\xa7\x9f\xce\x17\xcb\xf9\xd7\x6c\xb1\x9c\xcd\x99\xd7\xd3\xff\x5e\xcf\xd3\x6c\x7a\x7a\x3a\xbb\xbe\x5c\x5e\x4e\x2f\xd2\x7e\xba\x5e\xee\xe2\x06\x77\xdf\xf5\xf0\x4b\xfa\xf5\x6f\x38\xd8\xb7\x86\x04\x06\x9d\xdc\xdf\x08\x85\x45\xa1\xd7\x09\x0c\x24\x59\x1c\x6f\x95\x29\x68\xeb\xc6\x06\xfd\xe0\x99\x5a\x8a\xbf\x9c\x09\x5b\x80\xa4\x02\x03\x05\x63\x7d\x8c\x1f\xc9\x9c\x36\x96\x09\xa9\x99\xa6\x08\x52\x37\x5c\xc7\xe0\xbc\xf0\x08\xc2\x43\x81\xb5\xa6\xdd\x9a\x29\xeb\xd5\x1a\xa1\x20\x0c\x8d\x39\xd4\x62\x85\xc0\xdc\x74\xd1\x58\xc0\x8f\x16\x0b\x2e\x53\x3e\x0d\x7d\x21\x14\xab\x16\x39\x6a\x07\xa2\xae\xb5\xc2\x02\x84\x61\x2e\x8a\x62\xc7\xb2\x39\xc2\x1f\x0d\x5a\x85\x45\x34\x25\x56\x42\x19\xe7\x19\x03\xdb\xa9\x49\x19\xdf\xce\x10\x46\x11\x66\x49\x04\xd7\x8e\x83\x20\xa4\xc5\x2e\x27\xba\x71\x60\x1b\x33\x86\xa9\x76\xf4\x26\x9a\xe3\xe3\x50\xe9\xed\x60\x52\x52\x40\x9b\x76\x88\x1d\x0b\x06\xa5\xd0\xda\x41\x2e\xe4\xcd\x80\x01\xbd\x65\x9c\x96\x6a\xab\x84\x47\xbd\x83\x6d\x85\x16\x41\xb8\x43\x7b\x31\x5b\x7b\x8b\x9a\x56\x5c\x6f\x31\x44\x63\x58\xb6\x3a\x5b\xe1\x40\x68\x47\x50\x28\x27\x1b\xc7\xed\x13\x44\x4e\x5c\xf5\x65\xb4\xd6\x8e\xcd\x03\x7f\x0c\xa0\xa0\x90\xb5\xd8\xe7\x5a\x0f\x0e\x3e\x7c\x80\xd0\xec\xda\xb0\xef\x9b\x1c\x1b\x88\xb6\x6a\xb4\x25\x4a\x4e\x68\x89\xa2\x9d\x69\x6c\xad\x4d\x97\x30\x07\xca\xf1\xfe\x95\x5a\x55\x68\x39\x86\x5d\x4c\x9d\xb2\xfb\x2c\x74\x7e\x1f\x73\x66\x8e\x1b\xe5\x94\x07\x2d\x38\x9e\xff\xa8\xc9\xb1\x9f\x1d\x37\x38\x81\x6b\x32\x0e\x3d\x90\x85\xd7\xe4\x2b\xb4\xff\x3c\x42\xf5\xd0\x76\x3b\x07\x09\xbc\xfd\x4e\x49\x1c\x4a\x1e\xef\x94\x2d\x71\x13\x18\xd4\x16\x1d\x9a\x27\x2d\xa7\xdf\x53\xf1\xb6\x26\xcb\x38\xc2\x04\x8d\xd3\x2d\x81\xe1\xd5\x7c\x76\x91\x2e\xcf\xd2\xeb\x45\x96\x7e\xb9\x9a\xcd\x79\x26\xc6\x99\x39\xfc\xe6\x24\x36\xce\x0b\xad\x13\x58\xda\x06\xbf\x81\x33\xf8\xab\x2d\x96\xea\xb6\x3f\xb3\xfb\xa0\x1f\x6a\x30\xe3\x89\x91\xc0\x70\x7a\xb9\x38\xff\xe9\x73\x9a\xfd\x9c\x5e\x7d\x9e\x7d\x6d\xc7\x7b\x44\xd5\xad\x31\x0e\xed\x46\x49\xcc\x72\x4b\x37\x7c\xbd\x47\xb0\xbe\x25\xc8\xb8\x12\x80\xbb\xbb\xdf\x07\x77\x77\xa0\x33\xe1\xf2\xfd\x54\x6f\x0f\x5d\x56\x28\xe9\x7f\x3d\x0a\xef\x37\x50\xe5\x71\xf0\x4c\xe0\x21\x59\xb5\x52\x66\x08\xa8\x1d\x46\x17\x21\x1e\x8d\xd5\x70\x7f\x3f\xf8\xfd\xfe\xbe\x17\x82\x0e\xa5\x14\x5e\x68\x5a\x75\x30\x8f\xee\x3a\x51\x7e\x14\xe5\x0f\xf6\x9d\x67\xd7\x16\xf4\xb2\x60\xe7\x2e\x81\x5f\x07\x95\xf7\xb5\x4b\x26\x93\xb3\xd9\xa2\x9d\x29\xc9\xbb\xf7\xff\xfa\x71\xf0\x5b\x88\xaf\xc7\x75\xcd\xb4\x3f\x12\xb7\xa3\x80\x3a\xbd\x3d\xb2\xa0\xd7\x5f\xc4\x5e\xc1\xe5\x6c\x99\x26\x61\x17\x52\x0e\x2c\x16\x8d\x29\x84\xf1\x61\x4d\xb0\xf8\x47\xa3\x62\x77\xad\x84\x29\x34\x42\xa4\x2c\xb8\x1b\xdc\x42\x8e\x7e\x8b\x68\xa2\xa9\xde\x9e\x3d\x8a\x29\xe7\xee\x39\x6b\x33\xd0\xb6\xe0\xd9\xe9\x55\x2f\x28\x78\x2b\xd6\xb5\xc6\xb0\xd4\x72\xe2\xf6\x65\x31\xd8\x2c\xce\x66\xf3\x25\x57\xc2\x93\x7d\x54\x92\xbc\xa9\x55\x97\x70\xb4\x0f\xe4\x3e\x9d\x9d\xfe\x72\x75\xbe\x3c\x46\xef\x27\x8a\xb9\x70\x18\x77\xd9\x4e\xf5\xa7\xe9\x22\xe5\x5c\x7c\x57\xf7\x01\x6a\xa7\xfa\x7c\x80\x7f\xa6\x76\x8a\x15\x58\x2a\x83\xdf\xed\x4e\xe0\xe8\x61\xca\x78\xe1\x6e\xa0\x54\x1a\xbb\x1e\xd9\xca\x8e\x77\x6b\x0d\xca\x38\x55\x84\xb1\xd8\x37\x09\x96\x34\x42\x69\x69\xfd\x4c\x46\xb6\x4a\xeb\x6e\xee\x35\x9e\x6a\xaa\x1b\x26\x0b\xaf\x85\x8d\xe3\xd7\xd6\x73\x16\xfb\x9b\x6b\x9b\x4d\x1e\xa2\x3b\x65\x56\x07\x63\xca\x34\xeb\x1c\x2d\xef\xc1\x07\x93\xa3\x4f\x37\xe4\x87\x92\x12\xb9\x6e\x87\x0a\x3f\xed\x84\x65\xa6\x78\xb4\x46\x68\xe6\xdb\x53\xd4\xec\x6f\x8b\x43\x8b\x8f\xf8\x16\xf6\xc5\x08\x7a\x0d\xed\x08\xf4\x04\x62\x43\xaa\x68\xf1\x28\x23\x79\xe7\xe0\xcd\xc4\x79\x86\x55\x0a\xe9\xa1\x54\xa6\xe8\x70\xef\x5d\x85\x87\x1b\x80\xa4\xf5\x9a\x4c\xf7\xc5\xdf\xa6\x54\xab\x96\x28\x09\x4c\xd0\xcb\x49\x68\x29\x93\xbd\xc4\x0b\x39\x1c\x84\x1f\x1e\x91\xf1\x0a\x4d\xae\x95\xdc\xf7\xf4\xf6\x61\xb4\x6f\x0b\x77\x77\x30\x4e\x6f\x43\x5c\x2e\x5a\xc5\x33\x72\xed\x86\x0a\xf7\xf7\xc9\xbf\x7f\xf8\xe1\xfd\x24\x2a\x0e\xfa\x06\x45\xad\xfe\xb2\xb1\x07\x23\x2f\x69\x52\xf0\x2a\xbe\xd3\x2f\x09\x34\x99\x15\x5a\x30\x88\x05\x6f\x5b\x8e\x43\xfd\x24\x89\xa3\xf7\xe3\x1f\xc7\xef\xde\x8d\xde\xc6\x37\xd6\xd0\x62\x5b\x18\x64\x02\xe1\xbb\xf6\xb2\x43\x1f\x81\x30\x7d\x1e\x42\xc5\x5f\x71\x3b\xef\xa0\x0c\x4f\x4e\x88\x62\x67\x75\x99\xa7\x2c\x64\xeb\xe8\x1b\xfd\xff\x01\x00\x00\xff\xff\xc3\xf7\x65\xa1\x60\x10\x00\x00") +var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\x6d\x6f\xdb\x38\x12\xfe\x9e\x5f\x31\x50\x0a\xf8\x0e\xa8\x6d\xb4\x5d\xe0\x6e\x05\xf4\x83\x37\xab\x36\xc1\x36\x71\x60\x3b\x87\xf6\x16\x0b\x2d\x25\x8d\x2c\x5e\x28\x8e\x4a\x52\x76\xbc\xb9\xfc\xf7\xc5\x88\x94\xe3\x28\x49\x1b\x34\x9f\x22\x73\x5e\x1e\xce\x3c\xf3\xc2\xf1\x78\x7c\x74\x0c\x1f\xce\x3e\x9f\x27\x31\xcc\xf4\xce\x55\x52\xaf\x21\x43\x45\x5b\x90\x1a\x84\x52\xe3\x5c\x34\x16\x2a\x61\x41\xba\x91\x85\x8d\x50\x2d\x82\xc1\x46\x89\x1c\x0b\xc8\x76\x60\xb1\x60\x51\x57\xe1\xd1\x31\x84\x3f\x6a\x50\xdb\x4a\x96\xae\x16\xd6\xa1\xb1\xb9\x91\x8d\x9b\xd8\x0a\xb6\x95\xcc\x2b\x90\x16\x34\x39\x90\x05\x0a\xf5\x1a\xb6\x08\xb6\xa2\x56\x15\x50\xca\x1b\x70\x95\x70\x93\x23\x45\xb9\x50\x5e\x39\x3e\x02\xa8\xc8\x3a\xcb\xff\x00\x74\x27\xfc\x7d\x04\xb0\x11\x26\xfc\x2a\xb4\x95\x99\xc2\x34\x27\xad\x31\x77\x92\x74\xec\x25\x1f\x9c\x36\x3b\x57\x91\x4e\xa5\x76\x68\x1a\x83\x6c\x1c\xa6\xad\x35\xd3\x4c\xea\xa9\x3f\x7c\x7b\x74\xf4\xe0\x02\xa9\xb4\x6c\xd4\x09\xa9\xd1\xc8\xbf\xb0\x88\xe1\x83\x50\x16\x07\x52\xb9\x92\xa8\x5d\x9a\x49\x2d\xcc\xee\xc0\x28\xe5\x8f\xec\xd5\x62\x8d\xa9\x13\xeb\x18\xa2\xcd\x7f\x92\xc5\xf2\x6c\x7e\x11\x0d\x85\xb6\x98\xb1\x57\x4b\x0a\x83\x82\x16\x35\xc6\x10\x9d\x9d\xcf\x3e\x26\xe9\xe5\x22\xf9\x70\xf6\x79\xea\x3f\x56\x5f\x2e\x93\xf1\x16\xb3\x71\x50\x88\x87\x56\x0d\xae\xd3\xd6\xa8\x18\x46\xe7\xb3\xe5\x2a\x59\xa4\xf3\x45\xf2\x31\xbd\x5a\x7c\x1a\xbf\xba\xcd\xa9\x6e\x48\xa3\x76\x77\xf1\xab\xdb\x0d\x1a\x2b\x49\xdf\x8d\x06\x68\x7c\x1e\xd2\x02\x4b\xd1\x2a\x97\xda\x36\x2b\xa8\x16\x52\xc7\x30\x5a\x25\xe7\x97\x8b\xf9\xd5\x2a\x59\x9c\x5d\x4e\xb4\x6c\x26\x92\x46\xde\x6d\x60\x55\xf8\x7f\x55\x49\xdb\x67\x39\x43\x70\xbb\x06\xdf\x4b\x5d\x1a\xf1\x1a\xb2\xd6\x31\x05\x2a\xb1\x41\x70\x04\x4a\x6e\x10\xb6\xd2\x55\x60\x70\x2d\x49\x7b\x31\x28\xc9\x80\xa6\x6d\x30\x97\x61\x2e\x5a\x8b\x40\x25\x28\x5c\x8b\x7c\x07\x06\x85\x25\x6d\x07\xc8\x0d\xb5\x8c\xdc\xa2\xc2\xdc\x91\x89\x61\x74\x68\x74\x78\x4f\x3e\xb3\xce\xec\xbe\x2d\x7f\x0c\x17\xd4\x51\x17\xb6\xd5\x0e\x1c\x5f\x4c\x5a\x10\x50\xc8\xb2\x44\x83\xda\x41\x21\x9c\xe8\xae\xe8\x2f\x27\x1d\xc8\x21\xb0\xc6\x50\x8d\xae\xc2\xd6\xa6\x9a\x0a\x3c\xf0\x78\x1b\x5c\x8e\x62\x18\x79\xaf\x77\x7d\x40\x97\xe8\x60\xf6\x57\x6b\x10\x6c\x83\xb9\x2c\x65\xee\x5d\x71\x6c\xb8\x22\xb0\x80\xfe\x0a\x03\x77\xfe\xf4\xe0\x82\x8e\x0c\xb3\xaa\x31\xb4\x91\x05\x97\x41\x24\xd8\x70\x9a\x29\xca\xa2\x17\x2a\x5f\x4b\x5d\xc4\x10\x51\xf6\x3f\xcc\xdd\x4b\x95\xee\xdd\xa4\x22\xcf\xa9\xd5\xce\x33\x7b\xb4\x48\x3e\x9e\x2d\x57\x8b\x2f\xe9\x72\x35\x5f\x30\xaf\x67\xff\xbd\x5a\x24\xe9\xec\xe4\x64\x7e\x75\xb1\xba\x98\x9d\x27\xc3\x74\xbd\xdc\xc5\x35\xee\xbe\xeb\xe1\xb7\xe4\xcb\x0f\x38\xd8\xb7\x86\x18\xa2\x5e\xee\x07\x42\x61\x50\xa8\x3a\x86\x28\x27\x83\x93\xad\xd4\x05\x6d\xed\x44\xa3\x8b\x9e\xa8\xa5\xf0\xcb\xa9\x30\x05\xe4\x54\xa0\xa7\x60\xa8\x8f\xc9\x03\x99\x93\xd6\x30\x21\x15\xd3\x14\x21\x57\x2d\xd7\x31\x58\x27\x1c\x82\x70\x50\x60\xa3\x68\x57\x33\x65\x9d\xac\x11\x0a\x42\xdf\x98\x7d\x2d\x56\x08\xcc\x4d\x1b\x8c\x79\xfc\x68\xb0\xe0\x32\xe5\x53\xdf\x17\x7c\xb1\x2a\x91\xa1\xb2\x20\x9a\x46\x49\x2c\x40\x68\xe6\xa2\x28\x76\x2c\x9b\x21\x7c\x6d\xd1\x48\x2c\x82\x29\xb1\x16\x52\x5b\xc7\x18\xd8\x4e\x43\x52\xbb\x6e\x86\x30\x0a\x3f\x4b\x02\xb8\x6e\x1c\x78\x21\x25\x76\x19\xd1\xb5\x05\xd3\xea\x09\xcc\x94\xa5\xd7\xc1\x1c\x1f\xfb\x4a\xef\x06\x93\xcc\x05\x74\x69\x87\xd0\xb1\x20\x2a\x85\x52\x16\x32\x91\x5f\x47\x0c\xe8\x0d\xe3\x34\xd4\x18\x29\x1c\xaa\x1d\x6c\x2b\x34\x08\xc2\x1e\xda\x0b\xd9\xda\x5b\x54\xb4\xe6\x7a\x0b\x21\x9a\xc0\xaa\xd3\xd9\x0a\x0b\x42\x59\x82\x42\xda\xbc\xb5\xdc\x3e\x41\x64\xc4\x55\x5f\x06\x6b\xdd\xd8\x3c\xf0\xc7\x00\x0a\xf2\x59\x0b\x7d\xae\xf3\x60\xe1\xfd\x7b\xf0\xcd\xae\x0b\xfb\xbe\xc9\xb1\x81\x60\xab\x41\x53\x62\xce\x09\x2d\x51\x74\x33\x8d\xad\x75\xe9\x12\xfa\x40\x39\xdc\xbf\x92\xeb\x0a\x0d\xc7\xb0\x8f\xa9\x95\x66\x9f\x85\xde\xef\x43\xce\x2c\x70\x23\xad\x74\xa0\x04\xc7\xf3\x1f\x0d\x59\xf6\xb3\xe3\x06\x27\xb0\x26\x6d\xd1\x01\x19\x78\x45\xae\x42\xf3\xcf\x67\xa8\xee\xdb\x6e\xef\x20\x86\x37\xdf\x29\x89\x43\xc9\xe7\x3b\x65\x47\xdc\x18\xa2\xc6\xa0\x45\xfd\xa8\xe5\x0c\x7b\x2a\xde\x34\x64\x18\x87\x9f\xa0\x61\xba\xc5\x30\xba\x5c\xcc\xcf\x93\xd5\x69\x72\xb5\x4c\x93\xcf\x97\xf3\x05\xcf\xc4\x30\x33\x47\xdf\x9c\xc4\xda\x3a\xa1\x54\x0c\x2b\xd3\xe2\x37\x70\x7a\x7f\x8d\xc1\x52\xde\x0c\x67\xf6\x10\xf4\x7d\x0d\xa6\x3c\x31\x62\x18\xcd\x2e\x96\x67\xbf\x7c\x4a\xd2\x5f\x93\xcb\x4f\xf3\x2f\xdd\x78\x0f\xa8\xfa\x35\xc6\xa2\xd9\xc8\x1c\xd3\xcc\xd0\x35\x5f\xef\x01\xac\x6f\x09\x32\xae\x18\xe0\xf6\xf6\xcf\xe8\xf6\x16\x54\x2a\x6c\xb6\x9f\xea\xdd\xa1\x4d\x0b\x99\xbb\xdf\x9f\x85\xf7\x07\xc8\xf2\x79\xf0\x4c\xe0\x11\x19\xb9\x96\x7a\x04\xa8\x2c\x06\x17\x3e\x1e\xad\x51\x70\x77\x17\xfd\x79\x77\x37\x08\x41\x8f\x32\x17\x4e\x28\x5a\xf7\x30\x9f\xdd\x75\x82\xfc\x38\xc8\x1f\xec\x3b\x4f\xae\x2d\xe8\xf2\x82\x9d\xdb\x18\x7e\x8f\x2a\xe7\x1a\x1b\x4f\xa7\xa7\xf3\x65\x37\x53\xe2\xb7\xef\xfe\xf5\x73\xf4\x87\x8f\xaf\xc3\xba\x61\xda\x3f\x13\xb7\x67\x01\xf5\x7a\x7b\x64\x5e\x6f\xb8\x88\x1d\xc3\xc5\x7c\x95\xc4\x7e\x17\x92\x16\x0c\x16\xad\x2e\x84\x76\x7e\x4d\x30\xf8\xb5\x95\xa1\xbb\x56\x42\x17\x0a\x21\x50\x16\xec\x35\x6e\x21\x43\xb7\x45\xd4\xc1\xd4\x60\xcf\x1e\x87\x94\x73\xf7\x9c\x77\x19\xe8\x5a\xf0\xfc\xe4\x72\x10\x14\xbc\x11\x75\xa3\xd0\x2f\xb5\x9c\xb8\x7d\x59\x44\x9b\xe5\xe9\x7c\xb1\xe2\x4a\x78\xb4\x8f\xe6\x94\x5f\x37\xb2\x4f\x38\x9a\x7b\x72\x9f\xcc\x4f\x7e\xbb\x3c\x5b\x3d\x47\xef\x47\x8a\x99\xb0\x18\x76\xd9\x5e\xf5\x97\xd9\x32\xe1\x5c\x7c\x57\xf7\x1e\x6a\xaf\xfa\x74\x80\x7f\xa5\x6e\x8a\x15\x58\x4a\x8d\xdf\xed\x4e\x60\xe9\x7e\xca\x38\x61\xaf\xa1\x94\x0a\xfb\x1e\xd9\xc9\x4e\x76\xb5\x02\xa9\xad\x2c\xfc\x58\x1c\x9a\x04\x43\x0a\xa1\x34\x54\x3f\x91\x91\xad\x54\xaa\x9f\x7b\xad\xa3\x86\x9a\x96\xc9\xc2\x6b\x61\x6b\xf9\xb5\xf5\x94\xc5\xe1\xe6\xda\x65\x93\x87\xe8\x4e\xea\xf5\xc1\x98\xd2\x6d\x9d\xa1\xe1\x3d\xf8\x60\x72\x0c\xe9\x86\xfc\x50\x92\x22\x53\xdd\x50\xe1\xa7\x9d\x30\xcc\x14\x87\x46\x0b\xc5\x7c\x7b\x8c\x9a\xfd\x6d\x71\x64\xf0\x01\xdf\xfc\xbe\x18\x40\xd7\xd0\x8d\x40\x47\x20\x36\x24\x8b\x0e\x8f\xd4\x39\xef\x1c\xbc\x99\x58\xc7\xb0\x4a\x91\x3b\x28\xa5\x2e\x7a\xdc\x7b\x57\xfe\xe1\x06\x90\x53\x5d\x93\xee\xbf\xf8\x5b\x97\x72\xdd\x11\x25\x86\x29\xba\x7c\xea\x5b\xca\x74\x2f\xf1\x42\x0e\x7b\xe1\xfb\x47\x64\xb8\x42\x9b\x29\x99\xef\x7b\x7a\xf7\x30\xba\xbd\x85\xc6\xf0\x0e\xb2\xef\x0f\x11\x4c\x92\x1b\x1f\x9f\xf3\xce\xc0\x29\xd9\x6e\x53\x85\x28\xfe\xf7\x4f\x3f\xbd\x9b\x06\xfd\x08\xfe\x0f\x5f\x5b\x72\x08\xa1\xad\x1d\xb8\x10\x8d\xfc\x51\xf3\x4f\x9a\x7d\x49\x23\x83\xe3\xf0\x96\xbf\x20\x50\xa4\xd7\x68\x40\x23\x16\xbc\x91\x59\x4e\xc7\xa3\x44\x8f\xdf\x4d\x7e\x9e\xbc\x7d\x3b\x7e\x13\xde\x61\x23\x83\x5d\xf1\x90\xf6\x45\xd1\xb7\xa0\x1d\xba\x00\x84\x29\x76\x1f\x4e\xfe\x0a\x1b\x7c\x0f\x65\x74\x74\x44\x14\xba\xaf\x4d\x1d\xa5\x3e\xa3\xcf\xbe\xe3\xff\x0e\x00\x00\xff\xff\xea\xca\x5c\x16\x84\x10\x00\x00") func masterTmpAnsibleAzureLocalMasterInventoryYmlBytes() ([]byte, error) { return bindataRead( @@ -298,7 +298,7 @@ func masterTmpBootstrapconfigsMasterConfigYaml() (*asset, error) { return a, nil } -var _nodeEtcOriginCloudproviderAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\xd0\x51\x0a\x82\x40\x10\x06\xe0\xf7\x4e\xe1\x09\x3c\x40\x6f\x8b\x42\x04\x11\x91\x5e\x60\x5c\xa7\x18\x58\x77\x65\x9c\x0d\x4c\xbc\x7b\xac\xfa\x60\xb9\x45\xcf\xff\xf7\xff\xec\x8e\xa0\x05\x2b\xc7\x7a\x9f\x0c\x43\x92\xaa\xa7\x67\xcc\x9c\xbd\xd1\x3d\x2d\xe7\x24\x4f\xc6\x71\xd7\xf9\xaa\xd3\x4c\xad\x90\xb3\x31\x5b\xac\xf3\xa9\x01\x50\x67\x86\x30\x3e\xad\x54\xbe\x84\xef\xb6\x40\xcd\x28\x3f\xfc\x0c\x96\x4e\xf9\xd7\xd3\x19\x3b\xe7\x59\xe3\x81\x9d\x6f\xb7\xf4\xba\x8e\x83\x37\x4e\x43\xf8\xc6\x96\x9e\x96\x64\x3a\x08\x6a\xcf\x24\xfd\x54\x3b\x43\x83\x91\x9b\x7c\x92\xd0\x6b\x99\x1a\xe0\x5e\x3d\x80\x0c\x54\x64\x48\xfa\x02\x25\x3e\x70\xf9\x6a\xc3\xd2\x2b\x00\x00\xff\xff\x0d\x19\xb3\x4a\xb9\x01\x00\x00") +var _nodeEtcOriginCloudproviderAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x90\x51\x8a\x83\x30\x10\x86\xdf\xf7\x14\x39\x81\x07\xd8\xb7\xa0\xb0\x2c\x2c\xcb\xb2\x7a\x81\x31\x4e\xcb\x40\x4c\xec\x38\x29\x58\xeb\xdd\x4b\xab\x0f\x51\xd3\x42\x9f\xbf\xff\xfb\x87\xf9\x05\x1d\x38\xf9\x6e\x3e\xd5\x38\xaa\x4c\x5f\x02\x63\xee\xdd\x81\x8e\x59\x35\x93\x42\x5d\xd5\x29\x78\x41\x35\x4d\x1f\x7d\xa8\x7b\xc3\xd4\x09\x79\x97\x72\xca\x98\xaf\x4c\x80\x26\xb7\x84\xe9\x53\x5a\x17\x0b\x4c\x3b\x25\x1a\x46\x79\xe1\xcd\x81\x8d\x5b\xbd\xf5\x1a\x63\xef\x03\x1b\xfc\x62\x1f\xba\xbd\xf2\x1f\xe3\xd8\xb3\xde\xc0\xfd\xdd\xbd\xf2\xb3\x90\xd5\x80\x68\x02\x93\x0c\x8f\x9a\x5f\x68\x31\xb1\xe1\x36\x12\xfb\x1d\x53\x0b\x3c\xe8\x33\x90\x85\x9a\x2c\xc9\x50\xa2\xa4\x8b\xfe\x9e\x66\xe3\xc6\x5b\x00\x00\x00\xff\xff\x34\x7d\xff\xaa\x01\x02\x00\x00") func nodeEtcOriginCloudproviderAzureConfBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/openshift/certgen/unstable/templates/master/etc/etcd/etcd.conf b/pkg/openshift/certgen/unstable/templates/master/etc/etcd/etcd.conf index 4762d60b51..c148e25cc3 100644 --- a/pkg/openshift/certgen/unstable/templates/master/etc/etcd/etcd.conf +++ b/pkg/openshift/certgen/unstable/templates/master/etc/etcd/etcd.conf @@ -1,26 +1,26 @@ -ETCD_NAME={{ .Master.Hostname }} -ETCD_LISTEN_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380 +ETCD_NAME={{ .Master.Hostname | shellQuote }} +ETCD_LISTEN_PEER_URLS={{ print "https://" (index .Master.IPs 0).String ":2380" | shellQuote }} ETCD_DATA_DIR=/var/lib/etcd/ #ETCD_WAL_DIR="" #ETCD_SNAPSHOT_COUNT=10000 ETCD_HEARTBEAT_INTERVAL=500 ETCD_ELECTION_TIMEOUT=2500 -ETCD_LISTEN_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379 +ETCD_LISTEN_CLIENT_URLS={{ print "https://" (index .Master.IPs 0).String ":2379" | shellQuote }} #ETCD_MAX_SNAPSHOTS=5 #ETCD_MAX_WALS=5 #ETCD_CORS= #[cluster] -ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380 -ETCD_INITIAL_CLUSTER={{ .Master.Hostname }}=https://{{ (index .Master.IPs 0).String }}:2380 +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ print "https://" (index .Master.IPs 0).String ":2380" | shellQuote }} +ETCD_INITIAL_CLUSTER={{ print .Master.Hostname "=https://" (index .Master.IPs 0).String ":2380" | shellQuote }} ETCD_INITIAL_CLUSTER_STATE=new ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster-1 #ETCD_DISCOVERY= #ETCD_DISCOVERY_SRV= #ETCD_DISCOVERY_FALLBACK=proxy #ETCD_DISCOVERY_PROXY= -ETCD_ADVERTISE_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379 +ETCD_ADVERTISE_CLIENT_URLS={{ print "https://" (index .Master.IPs 0).String ":2379" | shellQuote }} #ETCD_STRICT_RECONFIG_CHECK="false" #ETCD_AUTO_COMPACTION_RETENTION="0" #ETCD_ENABLE_V2="true" diff --git a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml index c0d559967b..fdac6afb98 100644 --- a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml +++ b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml @@ -60,13 +60,13 @@ controllers: '*' corsAllowedOrigins: - (?i)//127\.0\.0\.1(:|\z) - (?i)//localhost(:|\z) -- (?i)//{{ QuoteMeta (index .Master.IPs 0).String }}(:|\z) +- {{ print "(?i)//" (QuoteMeta (index .Master.IPs 0).String) "(:|\\z)" | quote }} - (?i)//kubernetes\.default(:|\z) - (?i)//kubernetes\.default\.svc\.cluster\.local(:|\z) - (?i)//kubernetes(:|\z) -- (?i)//{{ QuoteMeta .ExternalMasterHostname }}(:|\z) +- {{ print "(?i)//" (QuoteMeta .ExternalMasterHostname) "(:|\\z)" | quote }} - (?i)//openshift\.default(:|\z) -- (?i)//{{ QuoteMeta .Master.Hostname }}(:|\z) +- {{ print "(?i)//" (QuoteMeta .Master.Hostname) "(:|\\z)" | quote }} - (?i)//openshift\.default\.svc(:|\z) - (?i)//kubernetes\.default\.svc(:|\z) - (?i)//172\.30\.0\.1(:|\z) @@ -80,7 +80,7 @@ etcdClientInfo: certFile: master.etcd-client.crt keyFile: master.etcd-client.key urls: - - https://{{ .Master.Hostname }}:2379 + - {{ print "https://" .Master.Hostname ":2379" | quote }} etcdStorageConfig: kubernetesStoragePrefix: kubernetes.io kubernetesStorageVersion: v1 @@ -119,7 +119,7 @@ kubernetesMasterConfig: cloud-config: - "/etc/origin/cloudprovider/azure.conf" masterCount: 1 - masterIP: {{ (index .Master.IPs 0).String }} + masterIP: {{ (index .Master.IPs 0).String | quote }} podEvictionTimeout: null proxyClientInfo: certFile: master.proxy-client.crt @@ -142,7 +142,7 @@ masterClients: contentType: application/vnd.kubernetes.protobuf qps: 300 openshiftLoopbackKubeConfig: openshift-master.kubeconfig -masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} +masterPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} networkConfig: clusterNetworkCIDR: 10.128.0.0/14 clusterNetworks: @@ -154,7 +154,7 @@ networkConfig: networkPluginName: redhat/openshift-ovs-subnet serviceNetworkCIDR: 172.30.0.0/16 oauthConfig: - assetPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }}/console/ + assetPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port "/console/" | quote }} grantConfig: method: auto identityProviders: @@ -166,8 +166,8 @@ oauthConfig: provider: apiVersion: v1 kind: OpenIDIdentityProvider - clientID: {{ .AzureConfig.AADClientID }} - clientSecret: {{ .AzureConfig.AADClientSecret }} + clientID: {{ .AzureConfig.AADClientID | quote }} + clientSecret: {{ .AzureConfig.AADClientSecret | quote }} claims: id: - sub @@ -178,8 +178,8 @@ oauthConfig: email: - email urls: - authorize: https://login.microsoftonline.com/{{ .AzureConfig.TenantID }}/oauth2/authorize - token: https://login.microsoftonline.com/{{ .AzureConfig.TenantID }}/oauth2/token + authorize: {{ print "https://login.microsoftonline.com/" .AzureConfig.TenantID "/oauth2/authorize" | quote }} + token: {{ print "https://login.microsoftonline.com/" .AzureConfig.TenantID "/oauth2/token" | quote }} {{- end}} - name: Local password challenge: true @@ -190,8 +190,8 @@ oauthConfig: file: /etc/origin/master/htpasswd kind: HTPasswdPasswordIdentityProvider masterCA: ca-bundle.crt - masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} - masterURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} + masterPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} + masterURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} sessionConfig: sessionMaxAgeSeconds: 3600 sessionName: ssn @@ -225,7 +225,7 @@ serviceAccountConfig: publicKeyFiles: - serviceaccounts.public.key servingInfo: - bindAddress: 0.0.0.0:{{ .Master.Port }} + bindAddress: {{ print "0.0.0.0:" .Master.Port | quote }} bindNetwork: tcp4 certFile: master.server.crt clientCA: ca.crt diff --git a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/session-secrets.yaml b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/session-secrets.yaml index e958ea1d0f..d440e83292 100644 --- a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/session-secrets.yaml +++ b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/session-secrets.yaml @@ -1,5 +1,5 @@ apiVersion: v1 kind: SessionSecrets secrets: -- authentication: "{{ .AuthSecret }}" - encryption: "{{ .EncSecret }}" +- authentication: {{ .AuthSecret | quote }} + encryption: {{ .EncSecret | quote }} diff --git a/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml b/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml index 365f530b9d..dde741302e 100644 --- a/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml +++ b/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml @@ -86,8 +86,8 @@ localmaster: config_base: /etc/origin/ examples_content_version: "vSHORT_VER" master: - public_console_url: "https://{{ .ExternalMasterHostname }}:8443/console" - public_api_url: "https://{{ .ExternalMasterHostname }}:8443" + public_console_url: {{ print "https://" .ExternalMasterHostname ":8443/console" | quote }} + public_api_url: {{ print "https://" .ExternalMasterHostname ":8443" | quote }} etcd_urls: ["https://HOSTNAME:2379"] #FIXME: No longer needed as of openshift-ansible-3.9.22-1 but we're not on that version yet node: nodename: 'HOSTNAME' diff --git a/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf b/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf index fa3d075114..870b2aad1d 100644 --- a/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf +++ b/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf @@ -1,9 +1,9 @@ -tenantId: {{ .AzureConfig.TenantID }} -subscriptionId: {{ .AzureConfig.SubscriptionID }} -aadClientId: {{ .AzureConfig.AADClientID }} -aadClientSecret: {{ .AzureConfig.AADClientSecret }} -aadTenantId: {{ .AzureConfig.TenantID }} -resourceGroup: {{ .AzureConfig.ResourceGroup }} -location: {{ .AzureConfig.Location }} -securityGroupName: {{ .AzureConfig.SecurityGroupName }} -primaryAvailabilitySetName: {{ .AzureConfig.PrimaryAvailabilitySetName }} +tenantId: {{ .AzureConfig.TenantID | quote }} +subscriptionId: {{ .AzureConfig.SubscriptionID | quote }} +aadClientId: {{ .AzureConfig.AADClientID | quote }} +aadClientSecret: {{ .AzureConfig.AADClientSecret | quote }} +aadTenantId: {{ .AzureConfig.TenantID | quote }} +resourceGroup: {{ .AzureConfig.ResourceGroup | quote }} +location: {{ .AzureConfig.Location | quote }} +securityGroupName: {{ .AzureConfig.SecurityGroupName | quote }} +primaryAvailabilitySetName: {{ .AzureConfig.PrimaryAvailabilitySetName | quote }} From 4d70c902cff555e7f184cadffdae32bf3d6182bd Mon Sep 17 00:00:00 2001 From: Patrick Lang Date: Fri, 6 Jul 2018 16:02:48 -0700 Subject: [PATCH 73/74] Adding e2e sample for kubenet+wincni and adding DNS back to e2e tests (#3429) * Reenable external connectivity test * Adding WinCNI sample --- examples/windows/README.md | 15 +++++++-- examples/windows/kubernetes-wincni.json | 43 +++++++++++++++++++++++++ test/e2e/kubernetes/kubernetes_test.go | 11 +++---- 3 files changed, 61 insertions(+), 8 deletions(-) create mode 100644 examples/windows/kubernetes-wincni.json diff --git a/examples/windows/README.md b/examples/windows/README.md index ad9f3d0c11..d171cd003e 100644 --- a/examples/windows/README.md +++ b/examples/windows/README.md @@ -1,4 +1,4 @@ -# Microsoft Azure Container Service Engine - Builds Docker Enabled Clusters +# Microsoft Azure Container Service Engine ## Overview @@ -8,4 +8,15 @@ These cluster definition examples demonstrate how to create customized Docker En * [Kubernetes Windows Walkthrough](../../docs/kubernetes/windows.md) - shows how to create a hybrid Kubernetes Windows enabled Docker cluster on Azure. * [Building Kubernetes Windows binaries](../../docs/kubernetes-build-win-binaries.md) - shows how to build kubernetes windows binaries for use in a Windows Kubernetes cluster. -* [Hybrid Swarm Mode with Linux and Windows nodes](../../docs/swarmmode-hybrid.md) - shows how to create a hybrid Swarm Mode cluster on Azure. \ No newline at end of file +* [Hybrid Swarm Mode with Linux and Windows nodes](../../docs/swarmmode-hybrid.md) - shows how to create a hybrid Swarm Mode cluster on Azure. + + +## Sample Deployments + +### Kubernetes + +- kubernetes.json - this is the simplest case for a 2-node Windows Kubernetes cluster +- kubernetes-custom-image.json - example using an existing Azure Managed Disk for Windows nodes. For example if you need a prerelease OS version, you can build a VHD, upload it and use this sample. +- kubernetes-hybrid.json - example with both Windows & Linux nodes in the same cluster +- kubernetes-wincni.json - example using kubenet plugin on Linux nodes and WinCNI on Windows +- kubernetes-windows-version.json - example of how to build a cluster with a specific Windows patch version \ No newline at end of file diff --git a/examples/windows/kubernetes-wincni.json b/examples/windows/kubernetes-wincni.json new file mode 100644 index 0000000000..874dc59a5a --- /dev/null +++ b/examples/windows/kubernetes-wincni.json @@ -0,0 +1,43 @@ +{ + "apiVersion": "vlabs", + "properties": { + "orchestratorProfile": { + "orchestratorType": "Kubernetes", + "kubernetesConfig": { + "networkPlugin": "kubenet" + } + }, + "masterProfile": { + "count": 1, + "dnsPrefix": "", + "vmSize": "Standard_D2_v2" + }, + "agentPoolProfiles": [ + { + "name": "agentwin", + "count": 2, + "vmSize": "Standard_D2_v2", + "availabilityProfile": "AvailabilitySet", + "osType": "Windows" + } + ], + "windowsProfile": { + "adminUsername": "azureuser", + "adminPassword": "replacepassword1234$" + }, + "linuxProfile": { + "adminUsername": "azureuser", + "ssh": { + "publicKeys": [ + { + "keyData": "" + } + ] + } + }, + "servicePrincipalProfile": { + "clientId": "", + "secret": "" + } + } +} diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 7f721e0989..608491d5a0 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -745,12 +745,11 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu iisPods, err := iisDeploy.Pods() Expect(err).NotTo(HaveOccurred()) Expect(len(iisPods)).ToNot(BeZero()) - // BUG - https://github.com/Azure/acs-engine/issues/3143 - // for _, iisPod := range iisPods { - // pass, err := iisPod.CheckWindowsOutboundConnection(10*time.Second, cfg.Timeout) - // Expect(err).NotTo(HaveOccurred()) - // Expect(pass).To(BeTrue()) - // } + for _, iisPod := range iisPods { + pass, err := iisPod.CheckWindowsOutboundConnection(10*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(pass).To(BeTrue()) + } err = iisDeploy.Delete() Expect(err).NotTo(HaveOccurred()) From 904db6a6f1e9a15c784d760f9e1d4e882c52d5db Mon Sep 17 00:00:00 2001 From: Madhan Raj Mookkandy Date: Fri, 6 Jul 2018 21:34:22 -0700 Subject: [PATCH 74/74] Fix the network blip caused during network creation/deletion (#3438) --- parts/k8s/kuberneteswindowssetup.ps1 | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/parts/k8s/kuberneteswindowssetup.ps1 b/parts/k8s/kuberneteswindowssetup.ps1 index 9bfa37daea..6acf4c4e6e 100644 --- a/parts/k8s/kuberneteswindowssetup.ps1 +++ b/parts/k8s/kuberneteswindowssetup.ps1 @@ -346,6 +346,7 @@ c:\k\kubelet.exe --hostname-override=`$env:computername --pod-infra-container-im `$global:KubeBinariesVersion = "$global:KubeBinariesVersion" `$global:CNIPath = "$global:CNIPath" `$global:NetworkMode = "$global:NetworkMode" +`$global:ExternalNetwork = "ext" `$global:CNIConfig = "$global:CNIConfig" `$global:HNSModule = "$global:HNSModule" `$global:VolumePluginDir = "$global:VolumePluginDir" @@ -362,14 +363,27 @@ Write-Host "NetworkPlugin azure, starting kubelet." netsh advfirewall set allprofiles state off # startup the service -`$hnsNetwork = Get-HnsNetwork | ? Type -EQ `$global:NetworkMode.ToLower() +# Find if the primary external switch network exists. If not create one. +# This is done only once in the lifetime of the node +`$hnsNetwork = Get-HnsNetwork | ? Name -EQ `$global:ExternalNetwork +if (!`$hnsNetwork) +{ + Write-Host "Creating a new hns Network" + ipmo `$global:HNSModule + # Fixme : use a smallest range possible, that will not collide with any pod space + New-HNSNetwork -Type `$global:NetworkMode -AddressPrefix "192.168.255.0/30" -Gateway "192.168.255.1" -Name `$global:ExternalNetwork -Verbose +} + +# Find if network created by CNI exists, if yes, remove it +# This is required to keep the network non-persistent behavior +# Going forward, this would be done by HNS automatically during restart of the node +`$hnsNetwork = Get-HnsNetwork | ? Name -EQ $global:KubeNetwork if (`$hnsNetwork) { - # Kubelet has been restarted with existing network. # Cleanup all containers docker ps -q | foreach {docker rm `$_ -f} - # cleanup network + Write-Host "Cleaning up old HNS network found" Remove-HnsNetwork `$hnsNetwork Start-Sleep 10 @@ -377,6 +391,7 @@ if (`$hnsNetwork) remove-item `$cnijson -ErrorAction SilentlyContinue } +# Restart Kubeproxy, which would wait, until the network is created Restart-Service Kubeproxy $KubeletCommandLine @@ -528,11 +543,12 @@ catch `$env:KUBE_NETWORK = "$global:KubeNetwork" `$global:NetworkMode = "$global:NetworkMode" `$global:HNSModule = "$global:HNSModule" -`$hnsNetwork = Get-HnsNetwork | ? Type -EQ `$global:NetworkMode.ToLower() +`$hnsNetwork = Get-HnsNetwork | ? Name -EQ $global:KubeNetwork while (!`$hnsNetwork) { + Write-Host "Waiting for Network [$global:KubeNetwork] to be created . . ." Start-Sleep 10 - `$hnsNetwork = Get-HnsNetwork | ? Type -EQ `$global:NetworkMode.ToLower() + `$hnsNetwork = Get-HnsNetwork | ? Name -EQ $global:KubeNetwork } #