diff --git a/.circleci/config.yml b/.circleci/config.yml index 77a23304ca..54c0636a51 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -153,30 +153,6 @@ jobs: path: /go/src/github.com/Azure/acs-engine/_logs - store_artifacts: path: /go/src/github.com/Azure/acs-engine/_output - k8s-windows-1.7-release-e2e: - <<: *defaults - steps: - - checkout - - run: | - echo 'export TIMEOUT=30m' >> $BASH_ENV - echo 'export ORCHESTRATOR_RELEASE=1.7' >> $BASH_ENV - echo 'export CLUSTER_DEFINITION=examples/e2e-tests/kubernetes/windows/definition.json' >> $BASH_ENV - echo 'export CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT}' >> $BASH_ENV - echo 'export RETAIN_SSH=false' >> $BASH_ENV - echo 'export SUBSCRIPTION_ID=${SUBSCRIPTION_ID_E2E_KUBERNETES}' >> $BASH_ENV - echo 'export CLIENT_ID=${SERVICE_PRINCIPAL_CLIENT_ID_E2E_KUBERNETES}' >> $BASH_ENV - echo 'export CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET_E2E_KUBERNETES}' >> $BASH_ENV - - run: - name: compile - command: make build-binary - - run: - name: ginkgo k8s windows e2e tests - command: make test-kubernetes - no_output_timeout: "30m" - - store_artifacts: - path: /go/src/github.com/Azure/acs-engine/_logs - - store_artifacts: - path: /go/src/github.com/Azure/acs-engine/_output k8s-1.9-release-e2e: <<: *defaults steps: @@ -284,30 +260,6 @@ jobs: path: /go/src/github.com/Azure/acs-engine/_logs - store_artifacts: path: /go/src/github.com/Azure/acs-engine/_output - k8s-windows-1.8-release-e2e: - <<: *defaults - steps: - - checkout - - run: | - echo 'export TIMEOUT=30m' >> $BASH_ENV - echo 'export ORCHESTRATOR_RELEASE=1.8' >> $BASH_ENV - echo 'export CLUSTER_DEFINITION=examples/e2e-tests/kubernetes/windows/definition.json' >> $BASH_ENV - echo 'export CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT}' >> $BASH_ENV - echo 'export RETAIN_SSH=false' >> $BASH_ENV - echo 'export SUBSCRIPTION_ID=${SUBSCRIPTION_ID_E2E_KUBERNETES}' >> $BASH_ENV - echo 'export CLIENT_ID=${SERVICE_PRINCIPAL_CLIENT_ID_E2E_KUBERNETES}' >> $BASH_ENV - echo 'export CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET_E2E_KUBERNETES}' >> $BASH_ENV - - run: - name: compile - command: make build-binary - - run: - name: ginkgo k8s windows e2e tests - command: make test-kubernetes - no_output_timeout: "30m" - - store_artifacts: - path: /go/src/github.com/Azure/acs-engine/_logs - - store_artifacts: - path: /go/src/github.com/Azure/acs-engine/_output k8s-windows-1.9-release-e2e: <<: *defaults steps: @@ -383,7 +335,7 @@ jobs: openshift-3.9-rhel-e2e: working_directory: /go/src/github.com/Azure/acs-engine docker: - - image: registry.svc.ci.openshift.org/ci/acs-engine-tests:v3.9 + - image: registry.svc.ci.openshift.org/azure/acs-engine-tests:v3.9 environment: GOPATH: /go steps: @@ -415,7 +367,7 @@ jobs: openshift-3.9-rhel-e2e-vnet: working_directory: /go/src/github.com/Azure/acs-engine docker: - - image: registry.svc.ci.openshift.org/ci/acs-engine-tests:v3.9 + - image: registry.svc.ci.openshift.org/azure/acs-engine-tests:v3.9 environment: GOPATH: /go steps: @@ -447,7 +399,7 @@ jobs: openshift-3.9-centos-e2e: working_directory: /go/src/github.com/Azure/acs-engine docker: - - image: registry.svc.ci.openshift.org/ci/acs-engine-tests:v3.9 + - image: registry.svc.ci.openshift.org/azure/acs-engine-tests:v3.9 environment: GOPATH: /go steps: @@ -503,24 +455,12 @@ workflows: filters: branches: ignore: master - - k8s-windows-1.7-release-e2e: - requires: - - pr-e2e-hold - filters: - branches: - ignore: master - k8s-1.8-release-e2e: requires: - pr-e2e-hold filters: branches: ignore: master - - k8s-windows-1.8-release-e2e: - requires: - - pr-e2e-hold - filters: - branches: - ignore: master - k8s-windows-1.9-release-e2e: requires: - pr-e2e-hold @@ -605,24 +545,12 @@ workflows: filters: branches: only: master - - k8s-windows-1.7-release-e2e: - requires: - - test - filters: - branches: - only: master - k8s-1.8-release-e2e: requires: - test filters: branches: only: master - - k8s-windows-1.8-release-e2e: - requires: - - test - filters: - branches: - only: master - k8s-windows-1.9-release-e2e: requires: - test diff --git a/.codecov/codecov.yml b/.codecov/codecov.yml index 3119a9c399..b61a827687 100644 --- a/.codecov/codecov.yml +++ b/.codecov/codecov.yml @@ -1,6 +1,6 @@ codecov: notify: - require_ci_to_pass: yes + require_ci_to_pass: no coverage: precision: 2 diff --git a/cmd/dcos-upgrade.go b/cmd/dcos-upgrade.go index e250f25cd6..301f6a1efb 100644 --- a/cmd/dcos-upgrade.go +++ b/cmd/dcos-upgrade.go @@ -2,7 +2,6 @@ package cmd import ( "encoding/json" - "fmt" "io/ioutil" "os" "path" @@ -15,6 +14,7 @@ import ( "github.com/Azure/acs-engine/pkg/i18n" "github.com/Azure/acs-engine/pkg/operations/dcosupgrade" "github.com/leonelquinteros/gotext" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -76,28 +76,28 @@ func (uc *dcosUpgradeCmd) validate(cmd *cobra.Command) error { uc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf("error loading translation files: %s", err.Error()) + return errors.Wrap(err, "error loading translation files") } if len(uc.resourceGroupName) == 0 { cmd.Usage() - return fmt.Errorf("--resource-group must be specified") + return errors.New("--resource-group must be specified") } if len(uc.location) == 0 { cmd.Usage() - return fmt.Errorf("--location must be specified") + return errors.New("--location must be specified") } uc.location = helpers.NormalizeAzureRegion(uc.location) if len(uc.upgradeVersion) == 0 { cmd.Usage() - return fmt.Errorf("--upgrade-version must be specified") + return errors.New("--upgrade-version must be specified") } if len(uc.deploymentDirectory) == 0 { cmd.Usage() - return fmt.Errorf("--deployment-dir must be specified") + return errors.New("--deployment-dir must be specified") } if len(uc.sshPrivateKeyPath) == 0 { @@ -105,11 +105,11 @@ func (uc *dcosUpgradeCmd) validate(cmd *cobra.Command) error { } if uc.sshPrivateKey, err = ioutil.ReadFile(uc.sshPrivateKeyPath); err != nil { cmd.Usage() - return fmt.Errorf("ssh-private-key-path must be specified: %s", err) + return errors.Wrap(err, "ssh-private-key-path must be specified") } if err = uc.authArgs.validateAuthArgs(); err != nil { - return fmt.Errorf("%s", err) + return err } return nil } @@ -118,19 +118,19 @@ func (uc *dcosUpgradeCmd) loadCluster(cmd *cobra.Command) error { var err error if uc.client, err = uc.authArgs.getClient(); err != nil { - return fmt.Errorf("Failed to get client: %s", err) + return errors.Wrap(err, "Failed to get client") } _, err = uc.client.EnsureResourceGroup(uc.resourceGroupName, uc.location, nil) if err != nil { - return fmt.Errorf("Error ensuring resource group: %s", err) + return errors.Wrap(err, "Error ensuring resource group") } // load apimodel from the deployment directory apiModelPath := path.Join(uc.deploymentDirectory, "apimodel.json") if _, err = os.Stat(apiModelPath); os.IsNotExist(err) { - return fmt.Errorf("specified api model does not exist (%s)", apiModelPath) + return errors.Errorf("specified api model does not exist (%s)", apiModelPath) } apiloader := &api.Apiloader{ @@ -140,24 +140,24 @@ func (uc *dcosUpgradeCmd) loadCluster(cmd *cobra.Command) error { } uc.containerService, uc.apiVersion, err = apiloader.LoadContainerServiceFromFile(apiModelPath, true, true, nil) if err != nil { - return fmt.Errorf("error parsing the api model: %s", err.Error()) + return errors.Wrap(err, "error parsing the api model") } uc.currentDcosVersion = uc.containerService.Properties.OrchestratorProfile.OrchestratorVersion if uc.currentDcosVersion == uc.upgradeVersion { - return fmt.Errorf("already running DCOS %s", uc.upgradeVersion) + return errors.Errorf("already running DCOS %s", uc.upgradeVersion) } if len(uc.containerService.Location) == 0 { uc.containerService.Location = uc.location } else if uc.containerService.Location != uc.location { - return fmt.Errorf("--location does not match api model location") + return errors.New("--location does not match api model location") } // get available upgrades for container service orchestratorInfo, err := api.GetOrchestratorVersionProfile(uc.containerService.Properties.OrchestratorProfile) if err != nil { - return fmt.Errorf("error getting list of available upgrades: %s", err.Error()) + return errors.Wrap(err, "error getting list of available upgrades") } // add the current version if upgrade has failed orchestratorInfo.Upgrades = append(orchestratorInfo.Upgrades, &api.OrchestratorProfile{ @@ -174,7 +174,7 @@ func (uc *dcosUpgradeCmd) loadCluster(cmd *cobra.Command) error { } } if !found { - return fmt.Errorf("upgrade to DCOS %s is not supported", uc.upgradeVersion) + return errors.Errorf("upgrade to DCOS %s is not supported", uc.upgradeVersion) } // Read name suffix to identify nodes in the resource group that belong diff --git a/cmd/deploy.go b/cmd/deploy.go index e47457bf77..e87ee9268f 100644 --- a/cmd/deploy.go +++ b/cmd/deploy.go @@ -1,7 +1,6 @@ package cmd import ( - "errors" "fmt" "io/ioutil" "math/rand" @@ -24,6 +23,7 @@ import ( "github.com/Azure/acs-engine/pkg/i18n" "github.com/Azure/azure-sdk-for-go/arm/graphrbac" "github.com/Azure/go-autorest/autorest/to" + "github.com/pkg/errors" ) const ( @@ -74,10 +74,10 @@ func newDeployCmd() *cobra.Command { Long: deployLongDescription, RunE: func(cmd *cobra.Command, args []string) error { if err := dc.validateArgs(cmd, args); err != nil { - log.Fatalf(fmt.Sprintf("error validating deployCmd: %s", err.Error())) + log.Fatalf("error validating deployCmd: %s", err.Error()) } if err := dc.mergeAPIModel(); err != nil { - log.Fatalf(fmt.Sprintf("error merging API model in deployCmd: %s", err.Error())) + log.Fatalf("error merging API model in deployCmd: %s", err.Error()) } if err := dc.loadAPIModel(cmd, args); err != nil { log.Fatalln("failed to load apimodel: %s", err.Error()) @@ -111,7 +111,7 @@ func (dc *deployCmd) validateArgs(cmd *cobra.Command, args []string) error { dc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf(fmt.Sprintf("error loading translation files: %s", err.Error())) + return errors.Wrap(err, "error loading translation files") } if dc.apimodelPath == "" { @@ -119,19 +119,19 @@ func (dc *deployCmd) validateArgs(cmd *cobra.Command, args []string) error { dc.apimodelPath = args[0] } else if len(args) > 1 { cmd.Usage() - return fmt.Errorf(fmt.Sprintf("too many arguments were provided to 'deploy'")) + return errors.New("too many arguments were provided to 'deploy'") } else { cmd.Usage() - return fmt.Errorf(fmt.Sprintf("--api-model was not supplied, nor was one specified as a positional argument")) + return errors.New("--api-model was not supplied, nor was one specified as a positional argument") } } if _, err := os.Stat(dc.apimodelPath); os.IsNotExist(err) { - return fmt.Errorf(fmt.Sprintf("specified api model does not exist (%s)", dc.apimodelPath)) + return errors.Errorf("specified api model does not exist (%s)", dc.apimodelPath) } if dc.location == "" { - return fmt.Errorf(fmt.Sprintf("--location must be specified")) + return errors.New("--location must be specified") } dc.location = helpers.NormalizeAzureRegion(dc.location) @@ -149,7 +149,7 @@ func (dc *deployCmd) mergeAPIModel() error { // overrides the api model and generates a new file dc.apimodelPath, err = transform.MergeValuesWithAPIModel(dc.apimodelPath, m) if err != nil { - return fmt.Errorf(fmt.Sprintf("error merging --set values with the api model: %s", err.Error())) + return errors.Wrap(err, "error merging --set values with the api model: %s") } log.Infoln(fmt.Sprintf("new api model file has been generated during merge: %s", dc.apimodelPath)) @@ -172,7 +172,7 @@ func (dc *deployCmd) loadAPIModel(cmd *cobra.Command, args []string) error { // do not validate when initially loading the apimodel, validation is done later after autofilling values dc.containerService, dc.apiVersion, err = apiloader.LoadContainerServiceFromFile(dc.apimodelPath, false, false, nil) if err != nil { - return fmt.Errorf(fmt.Sprintf("error parsing the api model: %s", err.Error())) + return errors.Wrap(err, "error parsing the api model") } if dc.outputDirectory == "" { @@ -190,10 +190,10 @@ func (dc *deployCmd) loadAPIModel(cmd *cobra.Command, args []string) error { if dc.caCertificatePath != "" { if caCertificateBytes, err = ioutil.ReadFile(dc.caCertificatePath); err != nil { - return fmt.Errorf(fmt.Sprintf("failed to read CA certificate file: %s", err.Error())) + return errors.Wrap(err, "failed to read CA certificate file") } if caKeyBytes, err = ioutil.ReadFile(dc.caPrivateKeyPath); err != nil { - return fmt.Errorf(fmt.Sprintf("failed to read CA private key file: %s", err.Error())) + return errors.Wrap(err, "failed to read CA private key file") } prop := dc.containerService.Properties @@ -207,16 +207,16 @@ func (dc *deployCmd) loadAPIModel(cmd *cobra.Command, args []string) error { if dc.containerService.Location == "" { dc.containerService.Location = dc.location } else if dc.containerService.Location != dc.location { - return fmt.Errorf(fmt.Sprintf("--location does not match api model location")) + return errors.New("--location does not match api model location") } if err = dc.authArgs.validateAuthArgs(); err != nil { - return fmt.Errorf("%s", err) + return err } dc.client, err = dc.authArgs.getClient() if err != nil { - return fmt.Errorf("failed to get client: %s", err.Error()) + return errors.Wrap(err, "failed to get client") } if err = autofillApimodel(dc); err != nil { @@ -239,11 +239,11 @@ func autofillApimodel(dc *deployCmd) error { } if dc.dnsPrefix != "" && dc.containerService.Properties.MasterProfile.DNSPrefix != "" { - return fmt.Errorf("invalid configuration: the apimodel masterProfile.dnsPrefix and --dns-prefix were both specified") + return errors.New("invalid configuration: the apimodel masterProfile.dnsPrefix and --dns-prefix were both specified") } if dc.containerService.Properties.MasterProfile.DNSPrefix == "" { if dc.dnsPrefix == "" { - return fmt.Errorf("apimodel: missing masterProfile.dnsPrefix and --dns-prefix was not specified") + return errors.New("apimodel: missing masterProfile.dnsPrefix and --dns-prefix was not specified") } log.Warnf("apimodel: missing masterProfile.dnsPrefix will use %q", dc.dnsPrefix) dc.containerService.Properties.MasterProfile.DNSPrefix = dc.dnsPrefix @@ -259,7 +259,7 @@ func autofillApimodel(dc *deployCmd) error { } if _, err := os.Stat(dc.outputDirectory); !dc.forceOverwrite && err == nil { - return fmt.Errorf("Output directory already exists and forceOverwrite flag is not set: %s", dc.outputDirectory) + return errors.Errorf("Output directory already exists and forceOverwrite flag is not set: %s", dc.outputDirectory) } if dc.resourceGroup == "" { @@ -267,7 +267,7 @@ func autofillApimodel(dc *deployCmd) error { log.Warnf("--resource-group was not specified. Using the DNS prefix from the apimodel as the resource group name: %s", dnsPrefix) dc.resourceGroup = dnsPrefix if dc.location == "" { - return fmt.Errorf("--resource-group was not specified. --location must be specified in case the resource group needs creation") + return errors.New("--resource-group was not specified. --location must be specified in case the resource group needs creation") } } @@ -279,7 +279,7 @@ func autofillApimodel(dc *deployCmd) error { } _, publicKey, err := acsengine.CreateSaveSSH(dc.containerService.Properties.LinuxProfile.AdminUsername, dc.outputDirectory, translator) if err != nil { - return fmt.Errorf("Failed to generate SSH Key: %s", err.Error()) + return errors.Wrap(err, "Failed to generate SSH Key") } dc.containerService.Properties.LinuxProfile.SSH.PublicKeys = []api.PublicKey{{KeyData: publicKey}} @@ -321,7 +321,7 @@ func autofillApimodel(dc *deployCmd) error { } applicationID, servicePrincipalObjectID, secret, err := dc.client.CreateApp(appName, appURL, replyURLs, requiredResourceAccess) if err != nil { - return fmt.Errorf("apimodel invalid: ServicePrincipalProfile was empty, and we failed to create valid credentials: %q", err) + return errors.Wrap(err, "apimodel invalid: ServicePrincipalProfile was empty, and we failed to create valid credentials") } log.Warnf("created application with applicationID (%s) and servicePrincipalObjectID (%s).", applicationID, servicePrincipalObjectID) @@ -329,7 +329,7 @@ func autofillApimodel(dc *deployCmd) error { err = dc.client.CreateRoleAssignmentSimple(dc.resourceGroup, servicePrincipalObjectID) if err != nil { - return fmt.Errorf("apimodel: could not create or assign ServicePrincipal: %q", err) + return errors.Wrap(err, "apimodel: could not create or assign ServicePrincipal") } diff --git a/cmd/generate.go b/cmd/generate.go index 694163696c..0867ff321a 100644 --- a/cmd/generate.go +++ b/cmd/generate.go @@ -1,7 +1,6 @@ package cmd import ( - "errors" "fmt" "io/ioutil" "os" @@ -12,6 +11,7 @@ import ( "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/acs-engine/pkg/i18n" "github.com/leonelquinteros/gotext" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -80,7 +80,7 @@ func (gc *generateCmd) validate(cmd *cobra.Command, args []string) error { gc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf(fmt.Sprintf("error loading translation files: %s", err.Error())) + return errors.Wrap(err, "error loading translation files") } if gc.apimodelPath == "" { @@ -96,7 +96,7 @@ func (gc *generateCmd) validate(cmd *cobra.Command, args []string) error { } if _, err := os.Stat(gc.apimodelPath); os.IsNotExist(err) { - return fmt.Errorf(fmt.Sprintf("specified api model does not exist (%s)", gc.apimodelPath)) + return errors.Errorf("specified api model does not exist (%s)", gc.apimodelPath) } return nil @@ -113,7 +113,7 @@ func (gc *generateCmd) mergeAPIModel() error { // overrides the api model and generates a new file gc.apimodelPath, err = transform.MergeValuesWithAPIModel(gc.apimodelPath, m) if err != nil { - return fmt.Errorf(fmt.Sprintf("error merging --set values with the api model: %s", err.Error())) + return errors.Wrap(err, "error merging --set values with the api model") } log.Infoln(fmt.Sprintf("new api model file has been generated during merge: %s", gc.apimodelPath)) @@ -134,7 +134,7 @@ func (gc *generateCmd) loadAPIModel(cmd *cobra.Command, args []string) error { } gc.containerService, gc.apiVersion, err = apiloader.LoadContainerServiceFromFile(gc.apimodelPath, true, false, nil) if err != nil { - return fmt.Errorf(fmt.Sprintf("error parsing the api model: %s", err.Error())) + return errors.Wrap(err, "error parsing the api model") } if gc.outputDirectory == "" { @@ -152,10 +152,10 @@ func (gc *generateCmd) loadAPIModel(cmd *cobra.Command, args []string) error { } if gc.caCertificatePath != "" { if caCertificateBytes, err = ioutil.ReadFile(gc.caCertificatePath); err != nil { - return fmt.Errorf(fmt.Sprintf("failed to read CA certificate file: %s", err.Error())) + return errors.Wrap(err, "failed to read CA certificate file") } if caKeyBytes, err = ioutil.ReadFile(gc.caPrivateKeyPath); err != nil { - return fmt.Errorf(fmt.Sprintf("failed to read CA private key file: %s", err.Error())) + return errors.Wrap(err, "failed to read CA private key file") } prop := gc.containerService.Properties diff --git a/cmd/root.go b/cmd/root.go index 50b4f23816..d6b2dd6686 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -1,10 +1,11 @@ package cmd import ( - "fmt" + "os" "github.com/Azure/acs-engine/pkg/armhelpers" "github.com/Azure/go-autorest/autorest/azure" + "github.com/pkg/errors" uuid "github.com/satori/go.uuid" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -44,6 +45,7 @@ func NewRootCmd() *cobra.Command { rootCmd.AddCommand(newUpgradeCmd()) rootCmd.AddCommand(newScaleCmd()) rootCmd.AddCommand(newDcosUpgradeCmd()) + rootCmd.AddCommand(getCompletionCmd(rootCmd)) return rootCmd } @@ -79,22 +81,22 @@ func (authArgs *authArgs) validateAuthArgs() error { if authArgs.AuthMethod == "client_secret" { if authArgs.ClientID.String() == "00000000-0000-0000-0000-000000000000" || authArgs.ClientSecret == "" { - return fmt.Errorf(`--client-id and --client-secret must be specified when --auth-method="client_secret"`) + return errors.New(`--client-id and --client-secret must be specified when --auth-method="client_secret"`) } // try parse the UUID } else if authArgs.AuthMethod == "client_certificate" { if authArgs.ClientID.String() == "00000000-0000-0000-0000-000000000000" || authArgs.CertificatePath == "" || authArgs.PrivateKeyPath == "" { - return fmt.Errorf(`--client-id and --certificate-path, and --private-key-path must be specified when --auth-method="client_certificate"`) + return errors.New(`--client-id and --certificate-path, and --private-key-path must be specified when --auth-method="client_certificate"`) } } if authArgs.SubscriptionID.String() == "00000000-0000-0000-0000-000000000000" { - return fmt.Errorf("--subscription-id is required (and must be a valid UUID)") + return errors.New("--subscription-id is required (and must be a valid UUID)") } _, err := azure.EnvironmentFromName(authArgs.RawAzureEnvironment) if err != nil { - return fmt.Errorf("failed to parse --azure-env as a valid target Azure cloud environment") + return errors.New("failed to parse --azure-env as a valid target Azure cloud environment") } return nil } @@ -113,7 +115,7 @@ func (authArgs *authArgs) getClient() (*armhelpers.AzureClient, error) { case "client_certificate": client, err = armhelpers.NewAzureClientWithClientCertificateFile(env, authArgs.SubscriptionID.String(), authArgs.ClientID.String(), authArgs.CertificatePath, authArgs.PrivateKeyPath) default: - return nil, fmt.Errorf("--auth-method: ERROR: method unsupported. method=%q", authArgs.AuthMethod) + return nil, errors.Errorf("--auth-method: ERROR: method unsupported. method=%q", authArgs.AuthMethod) } if err != nil { return nil, err @@ -125,3 +127,23 @@ func (authArgs *authArgs) getClient() (*armhelpers.AzureClient, error) { client.AddAcceptLanguages([]string{authArgs.language}) return client, nil } + +func getCompletionCmd(root *cobra.Command) *cobra.Command { + var completionCmd = &cobra.Command{ + Use: "completion", + Short: "Generates bash completion scripts", + Long: `To load completion run + + source <(acs-engine completion) + + To configure your bash shell to load completions for each session, add this to your bashrc + + # ~/.bashrc or ~/.profile + source <(acs-engine completion) + `, + Run: func(cmd *cobra.Command, args []string) { + root.GenBashCompletion(os.Stdout) + }, + } + return completionCmd +} diff --git a/cmd/root_test.go b/cmd/root_test.go new file mode 100644 index 0000000000..fb63c7ed49 --- /dev/null +++ b/cmd/root_test.go @@ -0,0 +1,21 @@ +package cmd + +import ( + "testing" + + "github.com/spf13/cobra" +) + +func TestNewRootCmd(t *testing.T) { + output := NewRootCmd() + if output.Use != rootName || output.Short != rootShortDescription || output.Long != rootLongDescription { + t.Fatalf("root command should have use %s equal %s, short %s equal %s and long %s equal to %s", output.Use, rootName, output.Short, rootShortDescription, output.Long, rootLongDescription) + } + expectedCommands := []*cobra.Command{getCompletionCmd(output), newDcosUpgradeCmd(), newDeployCmd(), newGenerateCmd(), newOrchestratorsCmd(), newScaleCmd(), newUpgradeCmd(), newVersionCmd()} + rc := output.Commands() + for i, c := range expectedCommands { + if rc[i].Use != c.Use { + t.Fatalf("root command should have command %s", c.Use) + } + } +} diff --git a/cmd/scale.go b/cmd/scale.go index df9d39999e..63c69bb043 100644 --- a/cmd/scale.go +++ b/cmd/scale.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/acs-engine/pkg/openshift/filesystem" "github.com/Azure/acs-engine/pkg/operations" "github.com/leonelquinteros/gotext" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -90,29 +91,29 @@ func (sc *scaleCmd) validate(cmd *cobra.Command) error { sc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf("error loading translation files: %s", err.Error()) + return errors.Wrap(err, "error loading translation files") } if sc.resourceGroupName == "" { cmd.Usage() - return fmt.Errorf("--resource-group must be specified") + return errors.New("--resource-group must be specified") } if sc.location == "" { cmd.Usage() - return fmt.Errorf("--location must be specified") + return errors.New("--location must be specified") } sc.location = helpers.NormalizeAzureRegion(sc.location) if sc.newDesiredAgentCount == 0 { cmd.Usage() - return fmt.Errorf("--new-node-count must be specified") + return errors.New("--new-node-count must be specified") } if sc.deploymentDirectory == "" { cmd.Usage() - return fmt.Errorf("--deployment-dir must be specified") + return errors.New("--deployment-dir must be specified") } return nil @@ -127,7 +128,7 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { } if sc.client, err = sc.authArgs.getClient(); err != nil { - return fmt.Errorf("failed to get client: %s", err.Error()) + return errors.Wrap(err, "failed to get client") } _, err = sc.client.EnsureResourceGroup(sc.resourceGroupName, sc.location, nil) @@ -139,7 +140,7 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { sc.apiModelPath = path.Join(sc.deploymentDirectory, "apimodel.json") if _, err = os.Stat(sc.apiModelPath); os.IsNotExist(err) { - return fmt.Errorf("specified api model does not exist (%s)", sc.apiModelPath) + return errors.Errorf("specified api model does not exist (%s)", sc.apiModelPath) } apiloader := &api.Apiloader{ @@ -149,25 +150,25 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { } sc.containerService, sc.apiVersion, err = apiloader.LoadContainerServiceFromFile(sc.apiModelPath, true, true, nil) if err != nil { - return fmt.Errorf("error parsing the api model: %s", err.Error()) + return errors.Wrap(err, "error parsing the api model") } if sc.containerService.Location == "" { sc.containerService.Location = sc.location } else if sc.containerService.Location != sc.location { - return fmt.Errorf("--location does not match api model location") + return errors.New("--location does not match api model location") } if sc.agentPoolToScale == "" { agentPoolCount := len(sc.containerService.Properties.AgentPoolProfiles) if agentPoolCount > 1 { - return fmt.Errorf("--node-pool is required if more than one agent pool is defined in the container service") + return errors.New("--node-pool is required if more than one agent pool is defined in the container service") } else if agentPoolCount == 1 { sc.agentPool = sc.containerService.Properties.AgentPoolProfiles[0] sc.agentPoolIndex = 0 sc.agentPoolToScale = sc.containerService.Properties.AgentPoolProfiles[0].Name } else { - return fmt.Errorf("No node pools found to scale") + return errors.New("No node pools found to scale") } } else { agentPoolIndex := -1 @@ -179,7 +180,7 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { } } if agentPoolIndex == -1 { - return fmt.Errorf("node pool %s was not found in the deployed api model", sc.agentPoolToScale) + return errors.Errorf("node pool %s was not found in the deployed api model", sc.agentPoolToScale) } } @@ -200,10 +201,10 @@ func (sc *scaleCmd) load(cmd *cobra.Command) error { func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { if err := sc.validate(cmd); err != nil { - return fmt.Errorf("failed to validate scale command: %s", err.Error()) + return errors.Wrap(err, "failed to validate scale command") } if err := sc.load(cmd); err != nil { - return fmt.Errorf("failed to load existing container service: %s", err.Error()) + return errors.Wrap(err, "failed to load existing container service") } orchestratorInfo := sc.containerService.Properties.OrchestratorProfile @@ -215,9 +216,9 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { //TODO handle when there is a nextLink in the response and get more nodes vms, err := sc.client.ListVirtualMachines(sc.resourceGroupName) if err != nil { - return fmt.Errorf("failed to get vms in the resource group. Error: %s", err.Error()) + return errors.Wrap(err, "failed to get vms in the resource group") } else if len(*vms.Value) < 1 { - return fmt.Errorf("The provided resource group does not contain any vms") + return errors.New("The provided resource group does not contain any vms") } for _, vm := range *vms.Value { vmTags := *vm.Tags @@ -254,7 +255,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { if currentNodeCount > sc.newDesiredAgentCount { if sc.masterFQDN == "" { cmd.Usage() - return fmt.Errorf("master-FQDN is required to scale down a kubernetes cluster's agent pool") + return errors.New("master-FQDN is required to scale down a kubernetes cluster's agent pool") } vmsToDelete := make([]string, 0) @@ -266,39 +267,43 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { case api.Kubernetes: kubeConfig, err := acsengine.GenerateKubeConfig(sc.containerService.Properties, sc.location) if err != nil { - return fmt.Errorf("failed to generate kube config: %v", err) + return errors.Wrap(err, "failed to generate kube config") } err = sc.drainNodes(kubeConfig, vmsToDelete) if err != nil { - return fmt.Errorf("Got error %+v, while draining the nodes to be deleted", err) + return errors.Wrap(err, "Got error while draining the nodes to be deleted") } case api.OpenShift: bundle := bytes.NewReader(sc.containerService.Properties.OrchestratorProfile.OpenShiftConfig.ConfigBundles["master"]) fs, err := filesystem.NewTGZReader(bundle) if err != nil { - return fmt.Errorf("failed to read master bundle: %v", err) + return errors.Wrap(err, "failed to read master bundle") } kubeConfig, err := fs.ReadFile("etc/origin/master/admin.kubeconfig") if err != nil { - return fmt.Errorf("failed to read kube config: %v", err) + return errors.Wrap(err, "failed to read kube config") } err = sc.drainNodes(string(kubeConfig), vmsToDelete) if err != nil { - return fmt.Errorf("Got error %v, while draining the nodes to be deleted", err) + return errors.Wrap(err, "Got error while draining the nodes to be deleted") } } errList := operations.ScaleDownVMs(sc.client, sc.logger, sc.SubscriptionID.String(), sc.resourceGroupName, vmsToDelete...) if errList != nil { - errorMessage := "" + var err error + format := "Node '%s' failed to delete with error: '%s'" for element := errList.Front(); element != nil; element = element.Next() { vmError, ok := element.Value.(*operations.VMScalingErrorDetails) if ok { - error := fmt.Sprintf("Node '%s' failed to delete with error: '%s'", vmError.Name, vmError.Error.Error()) - errorMessage = errorMessage + error + if err == nil { + err = errors.Errorf(format, vmError.Name, vmError.Error.Error()) + } else { + err = errors.Wrapf(err, format, vmError.Name, vmError.Error.Error()) + } } } - return fmt.Errorf(errorMessage) + return err } return nil @@ -306,7 +311,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { } else { vmssList, err := sc.client.ListVirtualMachineScaleSets(sc.resourceGroupName) if err != nil { - return fmt.Errorf("failed to get vmss list in the resource group. Error: %s", err.Error()) + return errors.Wrap(err, "failed to get vmss list in the resource group") } for _, vmss := range *vmssList.Value { vmTags := *vmss.Tags @@ -321,6 +326,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { osPublisher := *vmss.VirtualMachineProfile.StorageProfile.ImageReference.Publisher if strings.EqualFold(osPublisher, "MicrosoftWindowsServer") { _, _, winPoolIndex, err = utils.WindowsVMSSNameParts(*vmss.Name) + log.Errorln(err) } currentNodeCount = int(*vmss.Sku.Capacity) @@ -335,18 +341,18 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { } templateGenerator, err := acsengine.InitializeTemplateGenerator(ctx, sc.classicMode) if err != nil { - return fmt.Errorf("failed to initialize template generator: %s", err.Error()) + return errors.Wrap(err, "failed to initialize template generator") } sc.containerService.Properties.AgentPoolProfiles = []*api.AgentPoolProfile{sc.agentPool} template, parameters, _, err := templateGenerator.GenerateTemplate(sc.containerService, acsengine.DefaultGeneratorCode, false, BuildTag) if err != nil { - return fmt.Errorf("error generating template %s: %s", sc.apiModelPath, err.Error()) + return errors.Wrapf(err, "error generating template %s", sc.apiModelPath) } if template, err = transform.PrettyPrintArmTemplate(template); err != nil { - return fmt.Errorf("error pretty printing template: %s", err.Error()) + return errors.Wrap(err, "error pretty printing template") } templateJSON := make(map[string]interface{}) @@ -354,12 +360,12 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { err = json.Unmarshal([]byte(template), &templateJSON) if err != nil { - return err + return errors.Wrap(err, "error unmarshaling template") } err = json.Unmarshal([]byte(parameters), ¶metersJSON) if err != nil { - return err + return errors.Wrap(err, "errror unmarshalling parameters") } transformer := transform.Transformer{Translator: ctx.Translator} @@ -378,7 +384,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { case api.OpenShift: err = transformer.NormalizeForOpenShiftVMASScalingUp(sc.logger, sc.agentPool.Name, templateJSON) if err != nil { - return fmt.Errorf("error tranforming the template for scaling template %s: %v", sc.apiModelPath, err) + return errors.Wrapf(err, "error tranforming the template for scaling template %s", sc.apiModelPath) } if sc.agentPool.IsAvailabilitySets() { addValue(parametersJSON, fmt.Sprintf("%sOffset", sc.agentPool.Name), highestUsedIndex+1) @@ -386,7 +392,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { case api.Kubernetes: err = transformer.NormalizeForK8sVMASScalingUp(sc.logger, templateJSON) if err != nil { - return fmt.Errorf("error tranforming the template for scaling template %s: %s", sc.apiModelPath, err.Error()) + return errors.Wrapf(err, "error tranforming the template for scaling template %s", sc.apiModelPath) } if sc.agentPool.IsAvailabilitySets() { addValue(parametersJSON, fmt.Sprintf("%sOffset", sc.agentPool.Name), highestUsedIndex+1) @@ -395,7 +401,7 @@ func (sc *scaleCmd) run(cmd *cobra.Command, args []string) error { case api.SwarmMode: case api.DCOS: if sc.agentPool.IsAvailabilitySets() { - return fmt.Errorf("scaling isn't supported for orchestrator %s, with availability sets", orchestratorInfo.OrchestratorType) + return errors.Errorf("scaling isn't supported for orchestrator %q, with availability sets", orchestratorInfo.OrchestratorType) } transformer.NormalizeForVMSSScaling(sc.logger, templateJSON) } @@ -472,7 +478,7 @@ func (sc *scaleCmd) drainNodes(kubeConfig string, vmsToDelete []string) error { for i := 0; i < numVmsToDrain; i++ { errDetails := <-errChan if errDetails != nil { - return fmt.Errorf("Node %q failed to drain with error: %v", errDetails.Name, errDetails.Error) + return errors.Wrapf(errDetails.Error, "Node %q failed to drain with error", errDetails.Name) } } diff --git a/cmd/upgrade.go b/cmd/upgrade.go index 36e458abca..a9218509d9 100644 --- a/cmd/upgrade.go +++ b/cmd/upgrade.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/acs-engine/pkg/i18n" "github.com/Azure/acs-engine/pkg/operations/kubernetesupgrade" "github.com/leonelquinteros/gotext" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -77,17 +78,17 @@ func (uc *upgradeCmd) validate(cmd *cobra.Command) error { uc.locale, err = i18n.LoadTranslations() if err != nil { - return fmt.Errorf("error loading translation files: %s", err.Error()) + return errors.Wrap(err, "error loading translation files") } if uc.resourceGroupName == "" { cmd.Usage() - return fmt.Errorf("--resource-group must be specified") + return errors.New("--resource-group must be specified") } if uc.location == "" { cmd.Usage() - return fmt.Errorf("--location must be specified") + return errors.New("--location must be specified") } uc.location = helpers.NormalizeAzureRegion(uc.location) @@ -99,12 +100,12 @@ func (uc *upgradeCmd) validate(cmd *cobra.Command) error { // TODO(colemick): add in the cmd annotation to help enable autocompletion if uc.upgradeVersion == "" { cmd.Usage() - return fmt.Errorf("--upgrade-version must be specified") + return errors.New("--upgrade-version must be specified") } if uc.deploymentDirectory == "" { cmd.Usage() - return fmt.Errorf("--deployment-dir must be specified") + return errors.New("--deployment-dir must be specified") } return nil } @@ -113,23 +114,23 @@ func (uc *upgradeCmd) loadCluster(cmd *cobra.Command) error { var err error if err = uc.authArgs.validateAuthArgs(); err != nil { - return fmt.Errorf("%s", err.Error()) + return err } if uc.client, err = uc.authArgs.getClient(); err != nil { - return fmt.Errorf("Failed to get client: %s", err.Error()) + return errors.Wrap(err, "Failed to get client") } _, err = uc.client.EnsureResourceGroup(uc.resourceGroupName, uc.location, nil) if err != nil { - return fmt.Errorf("Error ensuring resource group: %s", err.Error()) + return errors.Wrap(err, "Error ensuring resource group") } // load apimodel from the deployment directory apiModelPath := path.Join(uc.deploymentDirectory, "apimodel.json") if _, err = os.Stat(apiModelPath); os.IsNotExist(err) { - return fmt.Errorf("specified api model does not exist (%s)", apiModelPath) + return errors.Errorf("specified api model does not exist (%s)", apiModelPath) } apiloader := &api.Apiloader{ @@ -139,19 +140,19 @@ func (uc *upgradeCmd) loadCluster(cmd *cobra.Command) error { } uc.containerService, uc.apiVersion, err = apiloader.LoadContainerServiceFromFile(apiModelPath, true, true, nil) if err != nil { - return fmt.Errorf("error parsing the api model: %s", err.Error()) + return errors.Wrap(err, "error parsing the api model") } if uc.containerService.Location == "" { uc.containerService.Location = uc.location } else if uc.containerService.Location != uc.location { - return fmt.Errorf("--location does not match api model location") + return errors.New("--location does not match api model location") } // get available upgrades for container service orchestratorInfo, err := api.GetOrchestratorVersionProfile(uc.containerService.Properties.OrchestratorProfile) if err != nil { - return fmt.Errorf("error getting list of available upgrades: %s", err.Error()) + return errors.Wrap(err, "error getting list of available upgrades") } // add the current version if upgrade has failed orchestratorInfo.Upgrades = append(orchestratorInfo.Upgrades, &api.OrchestratorProfile{ @@ -168,7 +169,7 @@ func (uc *upgradeCmd) loadCluster(cmd *cobra.Command) error { } } if !found { - return fmt.Errorf("version %s is not supported", uc.upgradeVersion) + return errors.Errorf("version %s is not supported", uc.upgradeVersion) } // Read name suffix to identify nodes in the resource group that belong diff --git a/docs/acsengine.md b/docs/acsengine.md index 89ab53bb14..27c5ccd331 100644 --- a/docs/acsengine.md +++ b/docs/acsengine.md @@ -12,6 +12,14 @@ You can also choose to install acs-engine using [gofish](https://www.gofi.sh/#ab If you would prefer to build `acs-engine` from source or you are interested in contributing to `acs-engine` see [building from source](#build-acs-engine-from-source) below. +## Completion + +`acs-engine` supports bash completion. To enable this, add the following to your `.bashrc` or `~/.profile` + +```bash +source <(acs-engine completion) +``` + ## Usage `acs-engine` reads a JSON [cluster definition](./clusterdefinition.md) and generates a number of files that may be submitted to Azure Resource Manager (ARM). The generated files include: diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index 29de840659..6b4e339eaf 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -6,70 +6,73 @@ Here are the cluster definitions for apiVersion "vlabs": ### apiVersion -|Name|Required|Description| -|---|---|---| -|apiVersion|yes|The version of the template. For "vlabs" the value is "vlabs"| +| Name | Required | Description | +| ---------- | -------- | ------------------------------------------------------------- | +| apiVersion | yes | The version of the template. For "vlabs" the value is "vlabs" | ### orchestratorProfile + `orchestratorProfile` describes the orchestrator settings. -|Name|Required|Description| -|---|---|---| -|orchestratorType|yes|Specifies the orchestrator type for the cluster| +| Name | Required | Description | +| ---------------- | -------- | ----------------------------------------------- | +| orchestratorType | yes | Specifies the orchestrator type for the cluster | Here are the valid values for the orchestrator types: -1. `DCOS` - this represents the [DC/OS orchestrator](dcos.md). [Older releases of DCOS 1.8 may be specified](../examples/dcos-releases). -2. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md). -3. `Swarm` - this represents the [Swarm orchestrator](swarm.md). -4. `Swarm Mode` - this represents the [Swarm Mode orchestrator](swarmmode.md). -5. `OpenShift` - this represents the [OpenShift orchestrator](openshift.md). +1. `DCOS` - this represents the [DC/OS orchestrator](dcos.md). [Older releases of DCOS 1.8 may be specified](../examples/dcos-releases). +2. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md). +3. `Swarm` - this represents the [Swarm orchestrator](swarm.md). +4. `Swarm Mode` - this represents the [Swarm Mode orchestrator](swarmmode.md). +5. `OpenShift` - this represents the [OpenShift orchestrator](openshift.md). ### kubernetesConfig `kubernetesConfig` describes Kubernetes specific configuration. -|Name|Required|Description| -|---|---|---| -|addons|no|Configure various Kubernetes addons configuration (currently supported: tiller, kubernetes-dashboard). See `addons` configuration below| -|apiServerConfig|no|Configure various runtime configuration for apiserver. See `apiServerConfig` [below](#feat-apiserver-config)| -|cloudControllerManagerConfig|no|Configure various runtime configuration for cloud-controller-manager. See `cloudControllerManagerConfig` [below](#feat-cloud-controller-manager-config)| -|clusterSubnet|no|The IP subnet used for allocating IP addresses for pod network interfaces. The subnet must be in the VNET address space. Default value is 10.244.0.0/16| -|containerRuntime|no|The container runtime to use as a backend. The default is `docker`. The other options are `clear-containers` and `containerd`| -|controllerManagerConfig|no|Configure various runtime configuration for controller-manager. See `controllerManagerConfig` [below](#feat-controller-manager-config)| -|customWindowsPackageURL|no|Configure custom windows Kubernetes release package URL for deployment on Windows| -|dnsServiceIP|no|IP address for kube-dns to listen on. If specified must be in the range of `serviceCidr`| -|dockerBridgeSubnet|no|The specific IP and subnet used for allocating IP addresses for the docker bridge network created on the kubernetes master and agents. Default value is 172.17.0.1/16. This value is used to configure the docker daemon using the [--bip flag](https://docs.docker.com/engine/userguide/networking/default_network/custom-docker0)| -|dockerEngineVersion|no|Which version of docker-engine to use in your cluster, e.g. "17.03.*"| -|enableAggregatedAPIs|no|Enable [Kubernetes Aggregated APIs](https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/).This is required by [Service Catalog](https://github.com/kubernetes-incubator/service-catalog/blob/master/README.md). (boolean - default == false) | -|enableDataEncryptionAtRest|no|Enable [kubernetes data encryption at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | -|enableEncryptionWithExternalKms|no|Enable [kubernetes data encryption at rest with external KMS](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | -|enablePodSecurityPolicy|no|Enable [kubernetes pod security policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/).This is currently a beta feature. (boolean - default == false)| -|enableRbac|no|Enable [Kubernetes RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) (boolean - default == true) | -|etcdDiskSizeGB|no|Size in GB to assign to etcd data volume. Defaults (if no user value provided) are: 256 GB for clusters up to 3 nodes; 512 GB for clusters with between 4 and 10 nodes; 1024 GB for clusters with between 11 and 20 nodes; and 2048 GB for clusters with more than 20 nodes| -|etcdEncryptionKey|no|Enryption key to be used if enableDataEncryptionAtRest is enabled. Defaults to a random, generated, key| -|gcHighThreshold|no|Sets the --image-gc-high-threshold value on the kublet configuration. Default is 85. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) | -|gcLowThreshold|no|Sets the --image-gc-low-threshold value on the kublet configuration. Default is 80. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) | -|kubeletConfig|no|Configure various runtime configuration for kubelet. See `kubeletConfig` [below](#feat-kubelet-config)| -|kubernetesImageBase|no|Specifies the base URL (everything preceding the actual image filename) of the kubernetes hyperkube image to use for cluster deployment, e.g., `k8s-gcrio.azureedge.net/`| -|networkPlugin|no|Specifies the network plugin implementation for the cluster. Valid values are:
`"azure"` (default), which provides an Azure native networking experience
`"kubenet"` for k8s software networking implementation.
`"flannel"` for using CoreOS Flannel
`"cilium"` for using the default Cilium CNI IPAM | -|networkPolicy|no|Specifies the network policy enforcement tool for the cluster (currently Linux-only). Valid values are:
`calico` for Calico network policy.
`cilium` for cilium network policy (Lin).
See [network policy examples](../examples/networkpolicy) for more information| -|privateCluster|no|Build a cluster without public addresses assigned. See `privateClusters` [below](#feat-private-cluster).| -|schedulerConfig|no|Configure various runtime configuration for scheduler. See `schedulerConfig` [below](#feat-scheduler-config)| -|serviceCidr|no|IP range for Service IPs, Default is "10.0.0.0/16". This range is never routed outside of a node so does not need to lie within clusterSubnet or the VNET| -|useInstanceMetadata|no|Use the Azure cloudprovider instance metadata service for appropriate resource discovery operations. Default is `true`| -|useManagedIdentity|no| Includes and uses MSI identities for all interactions with the Azure Resource Manager (ARM) API. Instead of using a static service principal written to /etc/kubernetes/azure.json, Kubernetes will use a dynamic, time-limited token fetched from the MSI extension running on master and agent nodes. This support is currently alpha and requires Kubernetes v1.9.1 or newer. (boolean - default == false) | +| Name | Required | Description | +| ------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| addons | no | Configure various Kubernetes addons configuration (currently supported: tiller, kubernetes-dashboard). See `addons` configuration below | +| apiServerConfig | no | Configure various runtime configuration for apiserver. See `apiServerConfig` [below](#feat-apiserver-config) | +| cloudControllerManagerConfig | no | Configure various runtime configuration for cloud-controller-manager. See `cloudControllerManagerConfig` [below](#feat-cloud-controller-manager-config) | +| clusterSubnet | no | The IP subnet used for allocating IP addresses for pod network interfaces. The subnet must be in the VNET address space. Default value is 10.244.0.0/16 | +| containerRuntime | no | The container runtime to use as a backend. The default is `docker`. The other options are `clear-containers` and `containerd` | +| controllerManagerConfig | no | Configure various runtime configuration for controller-manager. See `controllerManagerConfig` [below](#feat-controller-manager-config) | +| customWindowsPackageURL | no | Configure custom windows Kubernetes release package URL for deployment on Windows | +| dnsServiceIP | no | IP address for kube-dns to listen on. If specified must be in the range of `serviceCidr` | +| dockerBridgeSubnet | no | The specific IP and subnet used for allocating IP addresses for the docker bridge network created on the kubernetes master and agents. Default value is 172.17.0.1/16. This value is used to configure the docker daemon using the [--bip flag](https://docs.docker.com/engine/userguide/networking/default_network/custom-docker0) | +| dockerEngineVersion | no | Which version of docker-engine to use in your cluster, e.g. "17.03.\*" | +| enableAggregatedAPIs | no | Enable [Kubernetes Aggregated APIs](https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/).This is required by [Service Catalog](https://github.com/kubernetes-incubator/service-catalog/blob/master/README.md). (boolean - default is true for k8s versions greater or equal to 1.9.0, false otherwise) | +| enableDataEncryptionAtRest | no | Enable [kubernetes data encryption at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | +| enableEncryptionWithExternalKms | no | Enable [kubernetes data encryption at rest with external KMS](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) | +| enablePodSecurityPolicy | no | Enable [kubernetes pod security policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/).This is currently a beta feature. (boolean - default == false) | +| enableRbac | no | Enable [Kubernetes RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) (boolean - default == true) | +| etcdDiskSizeGB | no | Size in GB to assign to etcd data volume. Defaults (if no user value provided) are: 256 GB for clusters up to 3 nodes; 512 GB for clusters with between 4 and 10 nodes; 1024 GB for clusters with between 11 and 20 nodes; and 2048 GB for clusters with more than 20 nodes | +| etcdEncryptionKey | no | Enryption key to be used if enableDataEncryptionAtRest is enabled. Defaults to a random, generated, key | +| gcHighThreshold | no | Sets the --image-gc-high-threshold value on the kublet configuration. Default is 85. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) | +| gcLowThreshold | no | Sets the --image-gc-low-threshold value on the kublet configuration. Default is 80. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) | +| kubeletConfig | no | Configure various runtime configuration for kubelet. See `kubeletConfig` [below](#feat-kubelet-config) | +| kubernetesImageBase | no | Specifies the base URL (everything preceding the actual image filename) of the kubernetes hyperkube image to use for cluster deployment, e.g., `k8s-gcrio.azureedge.net/` | +| networkPlugin | no | Specifies the network plugin implementation for the cluster. Valid values are:
`"azure"` (default), which provides an Azure native networking experience
`"kubenet"` for k8s software networking implementation.
`"flannel"` for using CoreOS Flannel
`"cilium"` for using the default Cilium CNI IPAM | +| networkPolicy | no | Specifies the network policy enforcement tool for the cluster (currently Linux-only). Valid values are:
`"calico"` for Calico network policy.
`"cilium"` for cilium network policy (Lin), and `"azure"` (experimental) for Azure CNI-compliant network policy (note: Azure CNI-compliant network policy requires explicit `"networkPlugin": "azure"` configuration as well).
See [network policy examples](../examples/networkpolicy) for more information. | +| privateCluster | no | Build a cluster without public addresses assigned. See `privateClusters` [below](#feat-private-cluster). | +| schedulerConfig | no | Configure various runtime configuration for scheduler. See `schedulerConfig` [below](#feat-scheduler-config) | +| serviceCidr | no | IP range for Service IPs, Default is "10.0.0.0/16". This range is never routed outside of a node so does not need to lie within clusterSubnet or the VNET | +| useInstanceMetadata | no | Use the Azure cloudprovider instance metadata service for appropriate resource discovery operations. Default is `true` | +| useManagedIdentity | no | Includes and uses MSI identities for all interactions with the Azure Resource Manager (ARM) API. Instead of using a static service principal written to /etc/kubernetes/azure.json, Kubernetes will use a dynamic, time-limited token fetched from the MSI extension running on master and agent nodes. This support is currently alpha and requires Kubernetes v1.9.1 or newer. (boolean - default == false) | #### addons `addons` describes various addons configuration. It is a child property of `kubernetesConfig`. Below is a list of currently available addons: -|Name of addon|Enabled by default?|How many containers|Description| -|---|---|---|---| -|tiller|true|1|Delivers the Helm server-side component: tiller. See https://github.com/kubernetes/helm for more info| -|kubernetes-dashboard|true|1|Delivers the kubernetes dashboard component. See https://github.com/kubernetes/dashboard for more info| -|rescheduler|false|1|Delivers the kubernetes rescheduler component| -|cluster-autoscaler|false|1|Delivers the kubernetes cluster autoscaler component. See https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/azure for more info| +| Name of addon | Enabled by default? | How many containers | Description | +| --------------------------------------------------------------------- | ------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| tiller | true | 1 | Delivers the Helm server-side component: tiller. See https://github.com/kubernetes/helm for more info | +| kubernetes-dashboard | true | 1 | Delivers the Kubernetes dashboard component. See https://github.com/kubernetes/dashboard for more info | +| rescheduler | false | 1 | Delivers the Kubernetes rescheduler component | +| [cluster-autoscaler](../examples/addons/cluster-autoscaler/README.md) | false | 1 | Delivers the Kubernetes cluster autoscaler component. See https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/azure for more info | +| [nvidia-device-plugin](../examples/addons/nvidia-device-plugin/README.md) | true if using a Kubernetes cluster (v1.10+) with an N-series agent pool | 1 | Delivers the Kubernetes NVIDIA device plugin component. See https://github.com/NVIDIA/k8s-device-plugin for more info | +| container-monitoring | false | 1 | Delivers the Kubernetes container monitoring component | To give a bit more info on the `addons` property: We've tried to expose the basic bits of data that allow useful configuration of these cluster features. Here are some example usage patterns that will unpack what `addons` provide: @@ -110,6 +113,7 @@ More usefully, let's add some custom configuration to the above addons: "addons": [ { "name": "tiller", + "enabled": true, "containers": [ { "name": "tiller", @@ -123,6 +127,7 @@ More usefully, let's add some custom configuration to the above addons: }, { "name": "kubernetes-dashboard", + "enabled": true, "containers": [ { "name": "kubernetes-dashboard", @@ -135,6 +140,7 @@ More usefully, let's add some custom configuration to the above addons: }, { "name": "cluster-autoscaler", + "enabled": true, "containers": [ { "name": "cluster-autoscaler", @@ -167,6 +173,7 @@ Additionally above, we specified a custom docker image for tiller, let's say we Finally, the `addons.enabled` boolean property was omitted above; that's by design. If you specify a `containers` configuration, acs-engine assumes you're enabling the addon. The very first example above demonstrates a simple "enable this addon with default configuration" declaration. + #### kubeletConfig `kubeletConfig` declares runtime configuration for the kubelet running on all master and agent nodes. It is a generic key/value object, and a child property of `kubernetesConfig`. An example custom kubelet config: @@ -183,40 +190,41 @@ See [here](https://kubernetes.io/docs/reference/generated/kubelet/) for a refere Below is a list of kubelet options that acs-engine will configure by default: -|kubelet option|default value| -|---|---| -|"--cloud-config"|"/etc/kubernetes/azure.json"| -|"--cloud-provider"|"azure"| -|"--cluster-domain"|"cluster.local"| -|"--pod-infra-container-image"|"pause-amd64:*version*"| -|"--max-pods"|"30", or "110" if using kubenet --network-plugin (i.e., `"networkPlugin": "kubenet"`)| -|"--eviction-hard"|"memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%"| -|"--node-status-update-frequency"|"10s"| -|"--image-gc-high-threshold"|"85"| -|"--image-gc-low-threshold"|"850"| -|"--non-masquerade-cidr"|"10.0.0.0/8"| -|"--azure-container-registry-config"|"/etc/kubernetes/azure.json"| -|"--pod-max-pids"|"100" (need to activate the feature in --feature-gates=SupportPodPidsLimit=true)| -|"--image-pull-progress-deadline"|"30m"| -|"--feature-gates"|No default (can be a comma-separated list). On agent nodes `Accelerators=true` will be applied in the `--feature-gates` option for k8s versions before 1.11.0| - -Below is a list of kubelet options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces kubelet configuration, or because a static configuration is required to build a functional cluster: - -|kubelet option|default value| -|---|---| -|"--address"|"0.0.0.0"| -|"--allow-privileged"|"true"| -|"--pod-manifest-path"|"/etc/kubernetes/manifests"| -|"--network-plugin"|"cni"| -|"--node-labels"|(based on Azure node metadata)| -|"--cgroups-per-qos"|"true"| -|"--enforce-node-allocatable"|"pods"| -|"--kubeconfig"|"/var/lib/kubelet/kubeconfig"| -|"--register-node" (master nodes only)|"true"| -|"--register-with-taints" (master nodes only)|"node-role.kubernetes.io/master=true:NoSchedule"| -|"--keep-terminated-pod-volumes"|"false"| +| kubelet option | default value | +| ----------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| "--cloud-config" | "/etc/kubernetes/azure.json" | +| "--cloud-provider" | "azure" | +| "--cluster-domain" | "cluster.local" | +| "--pod-infra-container-image" | "pause-amd64:_version_" | +| "--max-pods" | "30", or "110" if using kubenet --network-plugin (i.e., `"networkPlugin": "kubenet"`) | +| "--eviction-hard" | "memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%" | +| "--node-status-update-frequency" | "10s" | +| "--image-gc-high-threshold" | "85" | +| "--image-gc-low-threshold" | "850" | +| "--non-masquerade-cidr" | "10.0.0.0/8" | +| "--azure-container-registry-config" | "/etc/kubernetes/azure.json" | +| "--pod-max-pids" | "100" (need to activate the feature in --feature-gates=SupportPodPidsLimit=true) | +| "--image-pull-progress-deadline" | "30m" | +| "--feature-gates" | No default (can be a comma-separated list). On agent nodes `Accelerators=true` will be applied in the `--feature-gates` option for k8s versions before 1.11.0 | + +Below is a list of kubelet options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces kubelet configuration, or because a static configuration is required to build a functional cluster: + +| kubelet option | default value | +| -------------------------------------------- | ------------------------------------------------ | +| "--address" | "0.0.0.0" | +| "--allow-privileged" | "true" | +| "--pod-manifest-path" | "/etc/kubernetes/manifests" | +| "--network-plugin" | "cni" | +| "--node-labels" | (based on Azure node metadata) | +| "--cgroups-per-qos" | "true" | +| "--enforce-node-allocatable" | "pods" | +| "--kubeconfig" | "/var/lib/kubelet/kubeconfig" | +| "--register-node" (master nodes only) | "true" | +| "--register-with-taints" (master nodes only) | "node-role.kubernetes.io/master=true:NoSchedule" | +| "--keep-terminated-pod-volumes" | "false" | + #### controllerManagerConfig `controllerManagerConfig` declares runtime configuration for the kube-controller-manager daemon running on all master nodes. Like `kubeletConfig` it is a generic key/value object, and a child property of `kubernetesConfig`. An example custom controller-manager config: @@ -236,35 +244,35 @@ See [here](https://kubernetes.io/docs/reference/generated/kube-controller-manage Below is a list of controller-manager options that acs-engine will configure by default: -|controller-manager option|default value| -|---|---| -|"--node-monitor-grace-period"|"40s"| -|"--pod-eviction-timeout"|"5m0s"| -|"--route-reconciliation-period"|"10s"| -|"--terminated-pod-gc-threshold"|"5000"| -|"--feature-gates"|No default (can be a comma-separated list)| - - -Below is a list of controller-manager options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces controller-manager configuration, or because a static configuration is required to build a functional cluster: - -|controller-manager option|default value| -|---|---| -|"--kubeconfig"|"/var/lib/kubelet/kubeconfig"| -|"--allocate-node-cidrs"|"false"| -|"--cluster-cidr"|"10.240.0.0/12"| -|"--cluster-name"|*auto-generated using api model properties*| -|"--cloud-provider"|"azure"| -|"--cloud-config"|"/etc/kubernetes/azure.json"| -|"--root-ca-file"|"/etc/kubernetes/certs/ca.crt"| -|"--cluster-signing-cert-file"|"/etc/kubernetes/certs/ca.crt"| -|"--cluster-signing-key-file"|"/etc/kubernetes/certs/ca.key"| -|"--service-account-private-key-file"|"/etc/kubernetes/certs/apiserver.key"| -|"--leader-elect"|"true"| -|"--v"|"2"| -|"--profiling"|"false"| -|"--use-service-account-credentials"|"false" ("true" if kubernetesConfig.enableRbac is true)| +| controller-manager option | default value | +| ------------------------------- | ------------------------------------------ | +| "--node-monitor-grace-period" | "40s" | +| "--pod-eviction-timeout" | "5m0s" | +| "--route-reconciliation-period" | "10s" | +| "--terminated-pod-gc-threshold" | "5000" | +| "--feature-gates" | No default (can be a comma-separated list) | + +Below is a list of controller-manager options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces controller-manager configuration, or because a static configuration is required to build a functional cluster: + +| controller-manager option | default value | +| ------------------------------------ | ------------------------------------------------------- | +| "--kubeconfig" | "/var/lib/kubelet/kubeconfig" | +| "--allocate-node-cidrs" | "false" | +| "--cluster-cidr" | "10.240.0.0/12" | +| "--cluster-name" | _auto-generated using api model properties_ | +| "--cloud-provider" | "azure" | +| "--cloud-config" | "/etc/kubernetes/azure.json" | +| "--root-ca-file" | "/etc/kubernetes/certs/ca.crt" | +| "--cluster-signing-cert-file" | "/etc/kubernetes/certs/ca.crt" | +| "--cluster-signing-key-file" | "/etc/kubernetes/certs/ca.key" | +| "--service-account-private-key-file" | "/etc/kubernetes/certs/apiserver.key" | +| "--leader-elect" | "true" | +| "--v" | "2" | +| "--profiling" | "false" | +| "--use-service-account-credentials" | "false" ("true" if kubernetesConfig.enableRbac is true) | + #### cloudControllerManagerConfig `cloudControllerManagerConfig` declares runtime configuration for the cloud-controller-manager daemon running on all master nodes in a Cloud Controller Manager configuration. Like `kubeletConfig` it is a generic key/value object, and a child property of `kubernetesConfig`. An example custom cloud-controller-manager config: @@ -281,25 +289,25 @@ See [here](https://kubernetes.io/docs/reference/generated/cloud-controller-manag Below is a list of cloud-controller-manager options that acs-engine will configure by default: -|controller-manager option|default value| -|---|---| -|"--route-reconciliation-period"|"10s"| - +| controller-manager option | default value | +| ------------------------------- | ------------- | +| "--route-reconciliation-period" | "10s" | -Below is a list of cloud-controller-manager options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces controller-manager configuration, or because a static configuration is required to build a functional cluster: +Below is a list of cloud-controller-manager options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces controller-manager configuration, or because a static configuration is required to build a functional cluster: -|controller-manager option|default value| -|---|---| -|"--kubeconfig"|"/var/lib/kubelet/kubeconfig"| -|"--allocate-node-cidrs"|"false"| -|"--cluster-cidr"|"10.240.0.0/12"| -|"--cluster-name"|*auto-generated using api model properties*| -|"--cloud-provider"|"azure"| -|"--cloud-config"|"/etc/kubernetes/azure.json"| -|"--leader-elect"|"true"| -|"--v"|"2"| +| controller-manager option | default value | +| ------------------------- | ------------------------------------------- | +| "--kubeconfig" | "/var/lib/kubelet/kubeconfig" | +| "--allocate-node-cidrs" | "false" | +| "--cluster-cidr" | "10.240.0.0/12" | +| "--cluster-name" | _auto-generated using api model properties_ | +| "--cloud-provider" | "azure" | +| "--cloud-config" | "/etc/kubernetes/azure.json" | +| "--leader-elect" | "true" | +| "--v" | "2" | + #### apiServerConfig `apiServerConfig` declares runtime configuration for the kube-apiserver daemon running on all master nodes. Like `kubeletConfig` and `controllerManagerConfig` it is a generic key/value object, and a child property of `kubernetesConfig`. An example custom apiserver config: @@ -311,6 +319,7 @@ Below is a list of cloud-controller-manager options that are *not* currently use } } ``` + Or perhaps you want to customize/override the set of admission-control flags passed to the API Server by default, you can omit the options you don't want and specify only the ones you need as follows: ``` @@ -329,63 +338,63 @@ See [here](https://kubernetes.io/docs/reference/generated/kube-apiserver/) for a Below is a list of apiserver options that acs-engine will configure by default: -|apiserver option|default value| -|---|---| -|"--admission-control"|"NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" (Kubernetes versions prior to 1.9.0| -|"--enable-admission-plugins"`*`|"NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" (Kubernetes versions 1.9.0 and later| -|"--authorization-mode"|"Node", "RBAC" (*the latter if enabledRbac is true*)| -|"--audit-log-maxage"|"30"| -|"--audit-log-maxbackup"|"10"| -|"--audit-log-maxsize"|"100"| -|"--feature-gates"|No default (can be a comma-separated list)| -|"--oidc-username-claim"|"oid" (*if has AADProfile*)| -|"--oidc-groups-claim"|"groups" (*if has AADProfile*)| -|"--oidc-client-id"|*calculated value that represents OID client ID* (*if has AADProfile*)| -|"--oidc-issuer-url"|*calculated value that represents OID issuer URL* (*if has AADProfile*)| +| apiserver option | default value | +| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| "--admission-control" | "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,AlwaysPullImages" (Kubernetes versions prior to 1.9.0 | +| "--enable-admission-plugins"`*` | "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,AlwaysPullImages" (Kubernetes versions 1.9.0 and later | +| "--authorization-mode" | "Node", "RBAC" (_the latter if enabledRbac is true_) | +| "--audit-log-maxage" | "30" | +| "--audit-log-maxbackup" | "10" | +| "--audit-log-maxsize" | "100" | +| "--feature-gates" | No default (can be a comma-separated list) | +| "--oidc-username-claim" | "oid" (_if has AADProfile_) | +| "--oidc-groups-claim" | "groups" (_if has AADProfile_) | +| "--oidc-client-id" | _calculated value that represents OID client ID_ (_if has AADProfile_) | +| "--oidc-issuer-url" | _calculated value that represents OID issuer URL_ (_if has AADProfile_) | `*` In Kubernetes versions 1.10.0 and later the `--admission-control` flag is deprecated and `--enable-admission-plugins` is used in its stead. - -Below is a list of apiserver options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces apiserver configuration, or because a static configuration is required to build a functional cluster: - -|apiserver option|default value| -|---|---| -|"--bind-address"|"0.0.0.0"| -|"--advertise-address"|*calculated value that represents listening URI for API server*| -|"--allow-privileged"|"true"| -|"--anonymous-auth"|"false| -|"--audit-log-path"|"/var/log/apiserver/audit.log"| -|"--insecure-port"|"8080"| -|"--secure-port"|"443"| -|"--service-account-lookup"|"true"| -|"--etcd-cafile"|"/etc/kubernetes/certs/ca.crt"| -|"--etcd-certfile"|"/etc/kubernetes/certs/etcdclient.crt"| -|"--etcd-keyfile"|"/etc/kubernetes/certs/etcdclient.key"| -|"--etcd-servers"|*calculated value that represents etcd servers*| -|"--profiling"|"false"| -|"--repair-malformed-updates"|"false"| -|"--tls-cert-file"|"/etc/kubernetes/certs/apiserver.crt"| -|"--tls-private-key-file"|"/etc/kubernetes/certs/apiserver.key"| -|"--client-ca-file"|"/etc/kubernetes/certs/ca.crt"| -|"--service-account-key-file"|"/etc/kubernetes/certs/apiserver.key"| -|"--kubelet-client-certificate"|"/etc/kubernetes/certs/client.crt"| -|"--kubelet-client-key"|"/etc/kubernetes/certs/client.key"| -|"--service-cluster-ip-range"|*see serviceCIDR*| -|"--storage-backend"|*calculated value that represents etcd version*| -|"--v"|"4"| -|"--experimental-encryption-provider-config"|"/etc/kubernetes/encryption-config.yaml" (*if enableDataEncryptionAtRest is true*)| -|"--experimental-encryption-provider-config"|"/etc/kubernetes/encryption-config.yaml" (*if enableEncryptionWithExternalKms is true*)| -|"--requestheader-client-ca-file"|"/etc/kubernetes/certs/proxy-ca.crt" (*if enableAggregatedAPIs is true*)| -|"--proxy-client-cert-file"|"/etc/kubernetes/certs/proxy.crt" (*if enableAggregatedAPIs is true*)| -|"--proxy-client-key-file"|"/etc/kubernetes/certs/proxy.key" (*if enableAggregatedAPIs is true*)| -|"--requestheader-allowed-names"|"" (*if enableAggregatedAPIs is true*)| -|"--requestheader-extra-headers-prefix"|"X-Remote-Extra-" (*if enableAggregatedAPIs is true*)| -|"--requestheader-group-headers"|"X-Remote-Group" (*if enableAggregatedAPIs is true*)| -|"--requestheader-username-headers"|"X-Remote-User" (*if enableAggregatedAPIs is true*)| -|"--cloud-provider"|"azure" (*unless useCloudControllerManager is true*)| -|"--cloud-config"|"/etc/kubernetes/azure.json" (*unless useCloudControllerManager is true*)| +Below is a list of apiserver options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces apiserver configuration, or because a static configuration is required to build a functional cluster: + +| apiserver option | default value | +| ------------------------------------------- | --------------------------------------------------------------------------------------- | +| "--bind-address" | "0.0.0.0" | +| "--advertise-address" | _calculated value that represents listening URI for API server_ | +| "--allow-privileged" | "true" | +| "--anonymous-auth" | "false | +| "--audit-log-path" | "/var/log/apiserver/audit.log" | +| "--insecure-port" | "8080" | +| "--secure-port" | "443" | +| "--service-account-lookup" | "true" | +| "--etcd-cafile" | "/etc/kubernetes/certs/ca.crt" | +| "--etcd-certfile" | "/etc/kubernetes/certs/etcdclient.crt" | +| "--etcd-keyfile" | "/etc/kubernetes/certs/etcdclient.key" | +| "--etcd-servers" | _calculated value that represents etcd servers_ | +| "--profiling" | "false" | +| "--repair-malformed-updates" | "false" | +| "--tls-cert-file" | "/etc/kubernetes/certs/apiserver.crt" | +| "--tls-private-key-file" | "/etc/kubernetes/certs/apiserver.key" | +| "--client-ca-file" | "/etc/kubernetes/certs/ca.crt" | +| "--service-account-key-file" | "/etc/kubernetes/certs/apiserver.key" | +| "--kubelet-client-certificate" | "/etc/kubernetes/certs/client.crt" | +| "--kubelet-client-key" | "/etc/kubernetes/certs/client.key" | +| "--service-cluster-ip-range" | _see serviceCIDR_ | +| "--storage-backend" | _calculated value that represents etcd version_ | +| "--v" | "4" | +| "--experimental-encryption-provider-config" | "/etc/kubernetes/encryption-config.yaml" (_if enableDataEncryptionAtRest is true_) | +| "--experimental-encryption-provider-config" | "/etc/kubernetes/encryption-config.yaml" (_if enableEncryptionWithExternalKms is true_) | +| "--requestheader-client-ca-file" | "/etc/kubernetes/certs/proxy-ca.crt" (_if enableAggregatedAPIs is true_) | +| "--proxy-client-cert-file" | "/etc/kubernetes/certs/proxy.crt" (_if enableAggregatedAPIs is true_) | +| "--proxy-client-key-file" | "/etc/kubernetes/certs/proxy.key" (_if enableAggregatedAPIs is true_) | +| "--requestheader-allowed-names" | "" (_if enableAggregatedAPIs is true_) | +| "--requestheader-extra-headers-prefix" | "X-Remote-Extra-" (_if enableAggregatedAPIs is true_) | +| "--requestheader-group-headers" | "X-Remote-Group" (_if enableAggregatedAPIs is true_) | +| "--requestheader-username-headers" | "X-Remote-User" (_if enableAggregatedAPIs is true_) | +| "--cloud-provider" | "azure" (_unless useCloudControllerManager is true_) | +| "--cloud-config" | "/etc/kubernetes/azure.json" (_unless useCloudControllerManager is true_) | + #### schedulerConfig `schedulerConfig` declares runtime configuration for the kube-scheduler daemon running on all master nodes. Like `kubeletConfig`, `controllerManagerConfig`, and `apiServerConfig` it is a generic key/value object, and a child property of `kubernetesConfig`. An example custom apiserver config: @@ -402,115 +411,119 @@ See [here](https://kubernetes.io/docs/reference/generated/kube-scheduler/) for a Below is a list of scheduler options that acs-engine will configure by default: -|kube-scheduler option|default value| -|---|---| -|"--v"|"2"| -|"--feature-gates"|No default (can be a comma-separated list)| +| kube-scheduler option | default value | +| --------------------- | ------------------------------------------ | +| "--v" | "2" | +| "--feature-gates" | No default (can be a comma-separated list) | +Below is a list of kube-scheduler options that are _not_ currently user-configurable, either because a higher order configuration vector is available that enforces kube-scheduler configuration, or because a static configuration is required to build a functional cluster: -Below is a list of kube-scheduler options that are *not* currently user-configurable, either because a higher order configuration vector is available that enforces kube-scheduler configuration, or because a static configuration is required to build a functional cluster: - -|kube-scheduler option|default value| -|---|---| -|"--kubeconfig"|"/var/lib/kubelet/kubeconfig"| -|"--leader-elect"|"true"| -|"--profiling"|"false"| +| kube-scheduler option | default value | +| --------------------- | ----------------------------- | +| "--kubeconfig" | "/var/lib/kubelet/kubeconfig" | +| "--leader-elect" | "true" | +| "--profiling" | "false" | We consider `kubeletConfig`, `controllerManagerConfig`, `apiServerConfig`, and `schedulerConfig` to be generic conveniences that add power/flexibility to cluster deployments. Their usage comes with no operational guarantees! They are manual tuning features that enable low-level configuration of a kubernetes cluster. + #### privateCluster `privateCluster` defines a cluster without public addresses assigned. It is a child property of `kubernetesConfig`. -|Name|Required|Description| -|---|---|---| -|enabled|no|Enable [Private Cluster](./kubernetes/features.md/#feat-private-cluster) (boolean - default == false) | -|jumpboxProfile|no|Configure and auto-provision a jumpbox to access your private cluster. `jumpboxProfile` is ignored if enabled is `false`. See `jumpboxProfile` below| +| Name | Required | Description | +| -------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | +| enabled | no | Enable [Private Cluster](./kubernetes/features.md/#feat-private-cluster) (boolean - default == false) | +| jumpboxProfile | no | Configure and auto-provision a jumpbox to access your private cluster. `jumpboxProfile` is ignored if enabled is `false`. See `jumpboxProfile` below | #### jumpboxProfile `jumpboxProfile` describes the settings for a jumpbox deployed via acs-engine to access a private cluster. It is a child property of `privateCluster`. -|Name|Required|Description| -|---|---|---| -|name|yes|This is the unique name for the jumpbox VM. Some resources deployed with the jumpbox are derived from this name| -|vmSize|yes|Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/)| -|publicKey|yes|The public SSH key used for authenticating access to the jumpbox. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation)| -|osDiskSizeGB|no|Describes the OS Disk Size in GB. Defaults to `30`| -|storageProfile|no|Specifies the storage profile to use. Valid values are [ManagedDisks](../examples/disks-managed) or [StorageAccount](../examples/disks-storageaccount). Defaults to `ManagedDisks`| -|username|no|Describes the admin username to be used on the jumpbox. Defaults to `azureuser`| +| Name | Required | Description | +| -------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| name | yes | This is the unique name for the jumpbox VM. Some resources deployed with the jumpbox are derived from this name | +| vmSize | yes | Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/) | +| publicKey | yes | The public SSH key used for authenticating access to the jumpbox. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation) | +| osDiskSizeGB | no | Describes the OS Disk Size in GB. Defaults to `30` | +| storageProfile | no | Specifies the storage profile to use. Valid values are [ManagedDisks](../examples/disks-managed) or [StorageAccount](../examples/disks-storageaccount). Defaults to `ManagedDisks` | +| username | no | Describes the admin username to be used on the jumpbox. Defaults to `azureuser` | ### masterProfile + `masterProfile` describes the settings for master configuration. -|Name|Required|Description| -|---|---|---| -|count|yes|Masters have count value of 1, 3, or 5 masters| -|dnsPrefix|yes|The dns prefix for the master FQDN. The master FQDN is used for SSH or commandline access. This must be a unique name. ([bring your own VNET examples](../examples/vnet))| -|subjectAltNames|no|An array of fully qualified domain names using which a user can reach API server. These domains are added as Subject Alternative Names to the generated API server certificate. **NOTE**: These domains **will not** be automatically provisioned.| -|firstConsecutiveStaticIP|only required when vnetSubnetId specified|The IP address of the first master. IP Addresses will be assigned consecutively to additional master nodes| -|vmsize|yes|Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores and 100GB of ephemeral disk space| -|osDiskSizeGB|no|Describes the OS Disk Size in GB| -|vnetSubnetId|no|Specifies the Id of an alternate VNET subnet. The subnet id must specify a valid VNET ID owned by the same subscription. ([bring your own VNET examples](../examples/vnet))| -|extensions|no|This is an array of extensions. This indicates that the extension be run on a single master. The name in the extensions array must exactly match the extension name in the extensionProfiles| -|vnetCidr|no|Specifies the VNET cidr when using a custom VNET ([bring your own VNET examples](../examples/vnet))| -|imageReference.name|no|The name of the Linux OS image. Needs to be used in conjunction with resourceGroup, below| -|imageReference.resourceGroup|no|Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above| -|distro|no|Select Master(s) Operating System (Linux only). Currently supported values are: `ubuntu` and `coreos` (CoreOS support is currently experimental). Defaults to `ubuntu` if undefined. Currently supported OS and orchestrator configurations -- `ubuntu`: DCOS, Docker Swarm, Kubernetes; `RHEL`: OpenShift; `coreos`: Kubernetes. [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json)| -|customFiles|no|The custom files to be provisioned to the master nodes. Defined as an array of json objects with each defined as `"source":"absolute-local-path", "dest":"absolute-path-on-masternodes"`.[See examples](../examples/customfiles) | +| Name | Required | Description | +| ---------------------------- | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| count | yes | Masters have count value of 1, 3, or 5 masters | +| dnsPrefix | yes | The dns prefix for the master FQDN. The master FQDN is used for SSH or commandline access. This must be a unique name. ([bring your own VNET examples](../examples/vnet)) | +| subjectAltNames | no | An array of fully qualified domain names using which a user can reach API server. These domains are added as Subject Alternative Names to the generated API server certificate. **NOTE**: These domains **will not** be automatically provisioned. | +| firstConsecutiveStaticIP | only required when vnetSubnetId specified | The IP address of the first master. IP Addresses will be assigned consecutively to additional master nodes | +| vmsize | yes | Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores and 100GB of ephemeral disk space | +| osDiskSizeGB | no | Describes the OS Disk Size in GB | +| vnetSubnetId | no | Specifies the Id of an alternate VNET subnet. The subnet id must specify a valid VNET ID owned by the same subscription. ([bring your own VNET examples](../examples/vnet)) | +| extensions | no | This is an array of extensions. This indicates that the extension be run on a single master. The name in the extensions array must exactly match the extension name in the extensionProfiles | +| vnetCidr | no | Specifies the VNET cidr when using a custom VNET ([bring your own VNET examples](../examples/vnet)) | +| imageReference.name | no | The name of the Linux OS image. Needs to be used in conjunction with resourceGroup, below | +| imageReference.resourceGroup | no | Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above | +| distro | no | Select Master(s) Operating System (Linux only). Currently supported values are: `ubuntu` and `coreos` (CoreOS support is currently experimental). Defaults to `ubuntu` if undefined. Currently supported OS and orchestrator configurations -- `ubuntu`: DCOS, Docker Swarm, Kubernetes; `RHEL`: OpenShift; `coreos`: Kubernetes. [Example of CoreOS Master with CoreOS Agents](../examples/coreos/kubernetes-coreos.json) | +| customFiles | no | The custom files to be provisioned to the master nodes. Defined as an array of json objects with each defined as `"source":"absolute-local-path", "dest":"absolute-path-on-masternodes"`.[See examples](../examples/customfiles) | ### agentPoolProfiles + A cluster can have 0 to 12 agent pool profiles. Agent Pool Profiles are used for creating agents with different capabilities such as VMSizes, VMSS or Availability Set, Public/Private access, user-defined OS Images, [attached storage disks](../examples/disks-storageaccount), [attached managed disks](../examples/disks-managed), or [Windows](../examples/windows). -|Name|Required|Description| -|---|---|---| -|availabilityProfile|no|Supported values are `VirtualMachineScaleSets` (default, except for Kubernetes clusters before version 1.10) and `AvailabilitySet`.| -|count|yes|Describes the node count| -|scaleSetPriority|no|Supported values are `Regular` (default) and `Low`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. Enables the usage of [Low-priority VMs on Scale Sets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-use-low-priority).| -|scaleSetEvictionPolicy|no|Supported values are `Delete` (default) and `Deallocate`. Only applies to clusters with availabilityProfile of `VirtualMachineScaleSets` and scaleSetPriority of `Low`.| -|diskSizesGB|no|Describes an array of up to 4 attached disk sizes. Valid disk size values are between 1 and 1024| -|dnsPrefix|Required if agents are to be exposed publically with a load balancer|The dns prefix that forms the FQDN to access the loadbalancer for this agent pool. This must be a unique name among all agent pools. Not supported for Kubernetes clusters| -|name|yes|This is the unique name for the agent pool profile. The resources of the agent pool profile are derived from this name| -|ports|only required if needed for exposing services publically|Describes an array of ports need for exposing publically. A tcp probe is configured for each port and only opens to an agent node if the agent node is listening on that port. A maximum of 150 ports may be specified. Not supported for Kubernetes clusters| -|storageProfile|no|Specifies the storage profile to use. Valid values are [ManagedDisks](../examples/disks-managed) or [StorageAccount](../examples/disks-storageaccount). Defaults to `ManagedDisks`| -|vmsize|yes|Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores| -|osDiskSizeGB|no|Describes the OS Disk Size in GB| -|vnetSubnetId|no|Specifies the Id of an alternate VNET subnet. The subnet id must specify a valid VNET ID owned by the same subscription. ([bring your own VNET examples](../examples/vnet))| -|imageReference.name|no|The name of a a Linux OS image. Needs to be used in conjunction with resourceGroup, below| -|imageReference.resourceGroup|no|Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above| -|osType|no|Specifies the agent pool's Operating System. Supported values are `Windows` and `Linux`. Defaults to `Linux`| -|distro|no|Specifies the agent pool's Linux distribution. Supported values are `ubuntu` and `coreos` (CoreOS support is currently experimental). Defaults to `ubuntu` if undefined, unless `osType` is defined as `Windows` (in which case `distro` is unused). Currently supported OS and orchestrator configurations -- `ubuntu`: DCOS, Docker Swarm, Kubernetes; `RHEL`: OpenShift; `coreos`: Kubernetes. [Example of CoreOS Master with Windows and Linux (CoreOS and Ubuntu) Agents](../examples/coreos/kubernetes-coreos-hybrid.json) | -|acceleratedNetworkingEnabled|no|Use [Azure Accelerated Networking](https://azure.microsoft.com/en-us/blog/maximize-your-vm-s-performance-with-accelerated-networking-now-generally-available-for-both-windows-and-linux/) feature for agents (You must select a VM SKU that support Accelerated Networking)| +| Name | Required | Description | +| ---------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| availabilityProfile | no | Supported values are `VirtualMachineScaleSets` (default, except for Kubernetes clusters before version 1.10) and `AvailabilitySet`. | +| count | yes | Describes the node count | +| scaleSetPriority | no | Supported values are `Regular` (default) and `Low`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. Enables the usage of [Low-priority VMs on Scale Sets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-use-low-priority). | +| scaleSetEvictionPolicy | no | Supported values are `Delete` (default) and `Deallocate`. Only applies to clusters with availabilityProfile of `VirtualMachineScaleSets` and scaleSetPriority of `Low`. | +| diskSizesGB | no | Describes an array of up to 4 attached disk sizes. Valid disk size values are between 1 and 1024 | +| dnsPrefix | Required if agents are to be exposed publically with a load balancer | The dns prefix that forms the FQDN to access the loadbalancer for this agent pool. This must be a unique name among all agent pools. Not supported for Kubernetes clusters | +| name | yes | This is the unique name for the agent pool profile. The resources of the agent pool profile are derived from this name | +| ports | only required if needed for exposing services publically | Describes an array of ports need for exposing publically. A tcp probe is configured for each port and only opens to an agent node if the agent node is listening on that port. A maximum of 150 ports may be specified. Not supported for Kubernetes clusters | +| storageProfile | no | Specifies the storage profile to use. Valid values are [ManagedDisks](../examples/disks-managed) or [StorageAccount](../examples/disks-storageaccount). Defaults to `ManagedDisks` | +| vmsize | yes | Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores | +| osDiskSizeGB | no | Describes the OS Disk Size in GB | +| vnetSubnetId | no | Specifies the Id of an alternate VNET subnet. The subnet id must specify a valid VNET ID owned by the same subscription. ([bring your own VNET examples](../examples/vnet)) | +| imageReference.name | no | The name of a a Linux OS image. Needs to be used in conjunction with resourceGroup, below | +| imageReference.resourceGroup | no | Resource group that contains the Linux OS image. Needs to be used in conjunction with name, above | +| osType | no | Specifies the agent pool's Operating System. Supported values are `Windows` and `Linux`. Defaults to `Linux` | +| distro | no | Specifies the agent pool's Linux distribution. Supported values are `ubuntu` and `coreos` (CoreOS support is currently experimental). Defaults to `ubuntu` if undefined, unless `osType` is defined as `Windows` (in which case `distro` is unused). Currently supported OS and orchestrator configurations -- `ubuntu`: DCOS, Docker Swarm, Kubernetes; `RHEL`: OpenShift; `coreos`: Kubernetes. [Example of CoreOS Master with Windows and Linux (CoreOS and Ubuntu) Agents](../examples/coreos/kubernetes-coreos-hybrid.json) | +| acceleratedNetworkingEnabled | no | Use [Azure Accelerated Networking](https://azure.microsoft.com/en-us/blog/maximize-your-vm-s-performance-with-accelerated-networking-now-generally-available-for-both-windows-and-linux/) feature for agents (You must select a VM SKU that support Accelerated Networking) | ### linuxProfile `linuxProfile` provides the linux configuration for each linux node in the cluster -|Name|Required|Description| -|---|---|---| -|adminUsername|yes|Describes the username to be used on all linux clusters| -|ssh.publicKeys.keyData|yes|The public SSH key used for authenticating access to all Linux nodes in the cluster. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation)| -|secrets|no|Specifies an array of key vaults to pull secrets from and what secrets to pull from each| -|customSearchDomain.name|no|describes the search domain to be used on all linux clusters| -|customSearchDomain.realmUser|no|describes the realm user with permissions to update dns registries on Windows Server DNS| -|customSearchDomain.realmPassword|no|describes the realm user password to update dns registries on Windows Server DNS| -|customNodesDNS.dnsServer|no|describes the IP address of the DNS Server| +| Name | Required | Description | +| -------------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| adminUsername | yes | Describes the username to be used on all linux clusters | +| ssh.publicKeys.keyData | yes | The public SSH key used for authenticating access to all Linux nodes in the cluster. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation) | +| secrets | no | Specifies an array of key vaults to pull secrets from and what secrets to pull from each | +| customSearchDomain.name | no | describes the search domain to be used on all linux clusters | +| customSearchDomain.realmUser | no | describes the realm user with permissions to update dns registries on Windows Server DNS | +| customSearchDomain.realmPassword | no | describes the realm user password to update dns registries on Windows Server DNS | +| customNodesDNS.dnsServer | no | describes the IP address of the DNS Server | #### secrets + `secrets` details which certificates to install on the masters and nodes in the cluster. A cluster can have a list of key vaults to install certs from. On linux boxes the certs are saved on under the directory "/var/lib/waagent/". 2 files are saved per certificate: -1. `{thumbprint}.crt` : this is the full cert chain saved in PEM format -2. `{thumbprint}.prv` : this is the private key saved in PEM format +1. `{thumbprint}.crt` : this is the full cert chain saved in PEM format +2. `{thumbprint}.prv` : this is the private key saved in PEM format + +| Name | Required | Description | +| -------------------------------- | -------- | ------------------------------------------------------------------- | +| sourceVault.id | yes | The azure resource manager id of the key vault to pull secrets from | +| vaultCertificates.certificateUrl | yes | Keyvault URL to this cert including the version | -|Name|Required|Description| -|---|---|---| -|sourceVault.id|yes|The azure resource manager id of the key vault to pull secrets from| -|vaultCertificates.certificateUrl|yes|Keyvault URL to this cert including the version| format for `sourceVault.id`, can be obtained in cli, or found in the portal: /subscriptions/{subscription-id}/resourceGroups/{resource-group}/providers/Microsoft.KeyVault/vaults/{keyvaultname} format for `vaultCertificates.certificateUrl`, can be obtained in cli, or found in the portal: @@ -518,90 +531,95 @@ https://{keyvaultname}.vault.azure.net:443/secrets/{secretName}/{version} ### servicePrincipalProfile -`servicePrincipalProfile` describes an Azure Service credentials to be used by the cluster for self-configuration. See [service principal](serviceprincipal.md) for more details on creation. - -|Name|Required|Description| -|---|---|---| -|clientId|yes, for Kubernetes clusters|describes the Azure client id. It is recommended to use a separate client ID per cluster| -|secret|yes, for Kubernetes clusters|describes the Azure client secret. It is recommended to use a separate client secret per client id| -|objectId|optional, for Kubernetes clusters|describes the Azure service principal object id. It is required if enableEncryptionWithExternalKms is true| -|keyvaultSecretRef.vaultId|no, for Kubernetes clusters|describes the vault id of the keyvault to retrieve the service principal secret from. See below for format.| -|keyvaultSecretRef.secretName|no, for Kubernetes clusters|describes the name of the service principal secret in keyvault| -|keyvaultSecretRef.version|no, for Kubernetes clusters|describes the version of the secret to use| +`servicePrincipalProfile` describes an Azure Service credentials to be used by the cluster for self-configuration. See [service principal](serviceprincipal.md) for more details on creation. +| Name | Required | Description | +| ---------------------------- | --------------------------------- | ----------------------------------------------------------------------------------------------------------- | +| clientId | yes, for Kubernetes clusters | describes the Azure client id. It is recommended to use a separate client ID per cluster | +| secret | yes, for Kubernetes clusters | describes the Azure client secret. It is recommended to use a separate client secret per client id | +| objectId | optional, for Kubernetes clusters | describes the Azure service principal object id. It is required if enableEncryptionWithExternalKms is true | +| keyvaultSecretRef.vaultId | no, for Kubernetes clusters | describes the vault id of the keyvault to retrieve the service principal secret from. See below for format. | +| keyvaultSecretRef.secretName | no, for Kubernetes clusters | describes the name of the service principal secret in keyvault | +| keyvaultSecretRef.version | no, for Kubernetes clusters | describes the version of the secret to use | format for `keyvaultSecretRef.vaultId`, can be obtained in cli, or found in the portal: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults/`. See [keyvault params](../examples/keyvault-params/README.md#service-principal-profile) for an example. ## Cluster Defintions for apiVersion "2016-03-30" -Here are the cluster definitions for apiVersion "2016-03-30". This matches the api version of the Azure Container Service Engine. +Here are the cluster definitions for apiVersion "2016-03-30". This matches the api version of the Azure Container Service Engine. ### apiVersion -|Name|Required|Description| -|---|---|---| -|apiVersion|yes|The version of the template. For "2016-03-30" the value is "2016-03-30"| +| Name | Required | Description | +| ---------- | -------- | ----------------------------------------------------------------------- | +| apiVersion | yes | The version of the template. For "2016-03-30" the value is "2016-03-30" | ### orchestratorProfile + `orchestratorProfile` describes the orchestrator settings. -|Name|Required|Description| -|---|---|---| -|orchestratorType|yes|Specifies the orchestrator type for the cluster| +| Name | Required | Description | +| ---------------- | -------- | ----------------------------------------------- | +| orchestratorType | yes | Specifies the orchestrator type for the cluster | Here are the valid values for the orchestrator types: -1. `DCOS` - this represents the [DC/OS orchestrator](dcos.md). -2. `Swarm` - this represents the [Swarm orchestrator](swarm.md). -3. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md). -4. `Swarm Mode` - this represents the [Swarm Mode orchestrator](swarmmode.md). -5. `OpenShift` - this represents the [OpenShift orchestrator](openshift.md) +1. `DCOS` - this represents the [DC/OS orchestrator](dcos.md). +2. `Swarm` - this represents the [Swarm orchestrator](swarm.md). +3. `Kubernetes` - this represents the [Kubernetes orchestrator](kubernetes.md). +4. `Swarm Mode` - this represents the [Swarm Mode orchestrator](swarmmode.md). +5. `OpenShift` - this represents the [OpenShift orchestrator](openshift.md) ### masterProfile + `masterProfile` describes the settings for master configuration. -|Name|Required|Description| -|---|---|---| -|count|yes|Masters have count value of 1, 3, or 5 masters| -|dnsPrefix|yes|The dns prefix for the masters FQDN. The master FQDN is used for SSH or commandline access. This must be a unique name. ([bring your own VNET examples](../examples/vnet))| +| Name | Required | Description | +| --------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| count | yes | Masters have count value of 1, 3, or 5 masters | +| dnsPrefix | yes | The dns prefix for the masters FQDN. The master FQDN is used for SSH or commandline access. This must be a unique name. ([bring your own VNET examples](../examples/vnet)) | ### agentPoolProfiles + For apiVersion "2016-03-30", a cluster may have only 1 agent pool profiles. -|Name|Required|Description| -|---|---|---| -|count|yes|Describes the node count| -|dnsPrefix|required if agents are to be exposed publically with a load balancer|The dns prefix that forms the FQDN to access the loadbalancer for this agent pool. This must be a unique name among all agent pools| -|name|yes|The unique name for the agent pool profile. The resources of the agent pool profile are derived from this name| -|vmsize|yes|Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores| +| Name | Required | Description | +| --------- | -------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| count | yes | Describes the node count | +| dnsPrefix | required if agents are to be exposed publically with a load balancer | The dns prefix that forms the FQDN to access the loadbalancer for this agent pool. This must be a unique name among all agent pools | +| name | yes | The unique name for the agent pool profile. The resources of the agent pool profile are derived from this name | +| vmsize | yes | Describes a valid [Azure VM Sizes](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-sizes/). These are restricted to machines with at least 2 cores | ### linuxProfile `linuxProfile` provides the linux configuration for each linux node in the cluster -|Name|Required|Description| -|---|---|---| -|adminUsername|yes|Describes the username to be used on all linux clusters| -|ssh.publicKeys[0].keyData|yes|The public SSH key used for authenticating access to all Linux nodes in the cluster. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation)| +| Name | Required | Description | +| ------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| adminUsername | yes | Describes the username to be used on all linux clusters | +| ssh.publicKeys[0].keyData | yes | The public SSH key used for authenticating access to all Linux nodes in the cluster. Here are instructions for [generating a public/private key pair](ssh.md#ssh-key-generation) | + ### aadProfile `aadProfile` provides [Azure Active Directory integration](kubernetes.aad.md) configuration for the cluster, currently only available for Kubernetes orchestrator. -|Name|Required|Description| -|---|---|---| -|clientAppID|yes|Describes the client AAD application ID| -|serverAppID|yes|Describes the server AAD application ID| -|adminGroupID|no|Describes the AAD Group Object ID that will be assigned the cluster-admin RBAC role| -|tenantID|no|Describes the AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription| +| Name | Required | Description | +| ------------ | -------- | --------------------------------------------------------------------------------------------------------------------------- | +| clientAppID | yes | Describes the client AAD application ID | +| serverAppID | yes | Describes the server AAD application ID | +| adminGroupID | no | Describes the AAD Group Object ID that will be assigned the cluster-admin RBAC role | +| tenantID | no | Describes the AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription | + ### extensionProfiles -A cluster can have 0 - N extensions in extension profiles. Extension profiles allow a user to easily add pre-packaged functionality into a cluster. An example would be configuring a monitoring solution on your cluster. You can think of extensions like a marketplace for acs clusters. - -|Name|Required|Description| -|---|---|---| -|name|yes|The name of the extension. This has to exactly match the name of a folder under the extensions folder| -|version|yes|The version of the extension. This has to exactly match the name of the folder under the extension name folder| -|extensionParameters|optional|Extension parameters may be required by extensions. The format of the parameters is also extension dependant| -|rootURL|optional|URL to the root location of extensions. The rootURL must have an extensions child folder that follows the extensions convention. The rootURL is mainly used for testing purposes| + +A cluster can have 0 - N extensions in extension profiles. Extension profiles allow a user to easily add pre-packaged functionality into a cluster. An example would be configuring a monitoring solution on your cluster. You can think of extensions like a marketplace for acs clusters. + +| Name | Required | Description | +| ------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| name | yes | The name of the extension. This has to exactly match the name of a folder under the extensions folder | +| version | yes | The version of the extension. This has to exactly match the name of the folder under the extension name folder | +| extensionParameters | optional | Extension parameters may be required by extensions. The format of the parameters is also extension dependant | +| rootURL | optional | URL to the root location of extensions. The rootURL must have an extensions child folder that follows the extensions convention. The rootURL is mainly used for testing purposes | You can find more information, as well as a list of extensions on the [extensions documentation](extensions.md). diff --git a/docs/kubernetes.md b/docs/kubernetes.md index 6d8bc7b6b6..954055aaa4 100644 --- a/docs/kubernetes.md +++ b/docs/kubernetes.md @@ -22,4 +22,4 @@ Here are recommended links to learn more about Kubernetes: 1. [Kubernetes Bootcamp](https://kubernetesbootcamp.github.io/kubernetes-bootcamp/index.html) - shows you how to deploy, scale, update, and debug containerized applications. 2. [Kubernetes Userguide](http://kubernetes.io/docs/user-guide/) - provides information on running programs in an existing Kubernetes cluster. -3. [Kubernetes Examples](https://github.com/kubernetes/kubernetes/tree/master/examples) - provides a number of examples on how to run real applications with Kubernetes. +3. [Kubernetes Examples](https://github.com/kubernetes/examples) - provides a number of examples on how to run real applications with Kubernetes. diff --git a/docs/kubernetes/features.md b/docs/kubernetes/features.md index fdc5a619ad..554fd28297 100644 --- a/docs/kubernetes/features.md +++ b/docs/kubernetes/features.md @@ -2,7 +2,7 @@ |Feature|Status|API Version|Example|Description| |---|---|---|---|---| -|Managed Disks|Beta|`vlabs`|[kubernetes-vmas.json](../../examples/disks-managed/kubernetes-vmss.json)|[Description](#feat-managed-disks)| +|Managed Disks|Beta|`vlabs`|[kubernetes-vmas.json](../../examples/disks-managed/kubernetes-vmas.json)|[Description](#feat-managed-disks)| |Calico Network Policy|Alpha|`vlabs`|[kubernetes-calico.json](../../examples/networkpolicy/kubernetes-calico.json)|[Description](#feat-calico)| |Cilium Network Policy|Alpha|`vlabs`|[kubernetes-cilium.json](../../examples/networkpolicy/kubernetes-cilium.json)|[Description](#feat-cilium)| |Custom VNET|Beta|`vlabs`|[kubernetesvnet-azure-cni.json](../../examples/vnet/kubernetesvnet-azure-cni.json)|[Description](#feat-custom-vnet)| @@ -370,4 +370,4 @@ To get `objectId` of the service principal: ```console az ad sp list --spn -``` \ No newline at end of file +``` diff --git a/docs/kubernetes/troubleshooting.md b/docs/kubernetes/troubleshooting.md index d09a5e08d2..f0b582966b 100644 --- a/docs/kubernetes/troubleshooting.md +++ b/docs/kubernetes/troubleshooting.md @@ -17,7 +17,7 @@ CSE stands for CustomScriptExtension, and is just a way of expressing: "a script To summarize, the way that acs-engine implements Kubernetes on Azure is a collection of (1) Azure VM configuration + (2) shell script execution. Both are implemented as a single operational unit, and when #2 fails, we consider the entire VM provisioning operation to be a failure; more importantly, if only one VM in the cluster deployment fails, we consider the entire cluster operation to be a failure. -### How To Debug CSE errors +### How To Debug CSE errors (Linux) In order to troubleshoot a cluster that failed in the above way(s), we need to grab the CSE logs from the host VM itself. @@ -57,6 +57,51 @@ If after following the above you are still unable to troubleshoot your deploymen 3. The content of `/var/log/azure/cluster-provision.log` and `/var/log/cloud-init-output.log` +### How To Debug CSE Errors (Windows) + +There are two symptoms where you may need to debug Custom Script Extension errors on Windows: + +- VMExtensionProvisioningError or VMExtensionProvisioningTimeout +- `kubectl node` doesn't list the Windows node(s) + +To get more logs, you need to connect to the Windows nodes using Remote Desktop. Since the nodes are on a private IP range, you will need to use SSH local port forwarding from a master node. + +1. Get the IP of the Windows node with `az vm list` and `az vm show` + + ``` + $ az vm list --resource-group group1 -o table + Name ResourceGroup Location + ------------------------ --------------- ---------- + 29442k8s9000 group1 westus2 + 29442k8s9001 group1 westus2 + k8s-linuxpool-29442807-0 group1 westus2 + k8s-linuxpool-29442807-1 group1 westus2 + k8s-master-29442807-0 group1 westus2 + + $ az vm show -g group1 -n 29442k8s9000 --show-details --query 'privateIps' + "10.240.0.4" + ``` + +2. Forward a local port to the Windows port 3389, such as `ssh -L 5500:10.240.0.4:3389 ..cloudapp.azure.com` +3. Run `mstsc.exe /v:localhost:5500` + +Once connected, check the following logs for errors: + + - `c:\Azure\CustomDataSetupScript.log` + + +## Windows kubelet & CNI errors + +If the node is not showing up in `kubectl get node` or fails to schedule pods, check for failures from the kubelet and CNI logs. + +Follow the same steps [above](#how-to-debug-cse-errors-windows) to connect to Remote Desktop to the node, then look for errors in these logs: + + - `c:\k\kubelet.log` + - `c:\k\kubelet.err.log` + - `c:\k\azure-vnet*.log` + + + # Misconfigured Service Principal If your Service Principal is misconfigured, none of the Kubernetes components will come up in a healthy manner. diff --git a/docs/kubernetes/windows.md b/docs/kubernetes/windows.md index 5d3a38b13b..82ca710c98 100644 --- a/docs/kubernetes/windows.md +++ b/docs/kubernetes/windows.md @@ -302,6 +302,10 @@ TODO Windows support is still in active development with many changes each week. Read on for more info on known per-version issues and troubleshooting if you run into problems. +### Finding logs + +To connect to a Windows node using Remote Desktop and get logs, please read over this topic in the main [troubleshooting](troubleshooting.md#how-to-debug-cse-errors-windows) page first. + ### Checking versions Please be sure to include this info with any Windows bug reports. @@ -313,12 +317,6 @@ Kubernetes - “kernel version” - Also note the IP Address for the next step, but you don't need to share it -Windows config -Connect to the Windows node with remote desktop. This is easiest forwarding a port through SSH from your Kubernetes management endpoint. - -1. `ssh -L 5500::3389 user@masterFQDN` -2. Once connected, run `mstsc.exe /v:localhost:5500` to connect. Log in with the username & password you set for the Windows agents. - The Azure CNI plugin version and configuration is stored in `C:\k\azurecni\netconf\10-azure.conflist`. Get - mode - dns.Nameservers diff --git a/examples/addons/cluster-autoscaler/README.md b/examples/addons/cluster-autoscaler/README.md index 5827da4197..507eaf905b 100644 --- a/examples/addons/cluster-autoscaler/README.md +++ b/examples/addons/cluster-autoscaler/README.md @@ -2,8 +2,8 @@ [Cluster Autoscaler](https://github.com/kubernetes/autoscaler) is a tool that automatically adjusts the size of the Kubernetes cluster when: -* there are pods that failed to run in the cluster due to insufficient resources. -* some nodes in the cluster are so underutilized, for an extended period of time, that they can be deleted and their pods will be easily placed on some other, existing nodes. +- there are pods that failed to run in the cluster due to insufficient resources. +- some nodes in the cluster are so underutilized, for an extended period of time, that they can be deleted and their pods will be easily placed on some other, existing nodes. This is the Kubernetes Cluster Autoscaler add-on for Virtual Machine Scale Sets. Add this add-on to your json file as shown below to automatically enable cluster autoscaler in your new Kubernetes cluster. @@ -85,4 +85,4 @@ Follow the README at https://github.com/kubernetes/autoscaler/tree/master/cluste ## Supported Orchestrators -* Kubernetes +- Kubernetes diff --git a/examples/e2e-tests/kubernetes/kubernetes-config/addons-disabled.json b/examples/e2e-tests/kubernetes/kubernetes-config/addons-disabled.json index 9884671690..ebd3964690 100644 --- a/examples/e2e-tests/kubernetes/kubernetes-config/addons-disabled.json +++ b/examples/e2e-tests/kubernetes/kubernetes-config/addons-disabled.json @@ -20,6 +20,14 @@ { "name": "rescheduler", "enabled": false + }, + { + "name": "nvidia-device-plugin", + "enabled": false + }, + { + "name": "container-monitoring", + "enabled": false } ] } diff --git a/examples/e2e-tests/kubernetes/kubernetes-config/addons-enabled.json b/examples/e2e-tests/kubernetes/kubernetes-config/addons-enabled.json new file mode 100644 index 0000000000..b07d50d23b --- /dev/null +++ b/examples/e2e-tests/kubernetes/kubernetes-config/addons-enabled.json @@ -0,0 +1,64 @@ +{ + "apiVersion": "vlabs", + "properties": { + "orchestratorProfile": { + "orchestratorType": "Kubernetes", + "orchestratorRelease": "1.10", + "kubernetesConfig": { + "addons": [ + { + "name": "tiller", + "enabled": true + }, + { + "name": "aci-connector", + "enabled": true + }, + { + "name": "kubernetes-dashboard", + "enabled": true + }, + { + "name": "rescheduler", + "enabled": true + }, + { + "name": "nvidia-device-plugin", + "enabled": true + }, + { + "name": "container-monitoring", + "enabled": true + } + ] + } + }, + "masterProfile": { + "count": 1, + "dnsPrefix": "", + "vmSize": "Standard_D2_v2" + }, + "agentPoolProfiles": [ + { + "name": "linuxpool1", + "count": 3, + "vmSize": "Standard_D2_v2" + } + ], + "linuxProfile": { + "adminUsername": "azureuser", + "ssh": { + "publicKeys": [ + { + "keyData": "" + } + ] + } + }, + "servicePrincipalProfile": { + "clientId": "", + "secret": "" + }, + "certificateProfile": {} + } +} \ No newline at end of file diff --git a/examples/e2e-tests/kubernetes/kubernetes-config/containerd.json b/examples/e2e-tests/kubernetes/kubernetes-config/containerd.json deleted file mode 100644 index facad5df83..0000000000 --- a/examples/e2e-tests/kubernetes/kubernetes-config/containerd.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "apiVersion": "vlabs", - "properties": { - "orchestratorProfile": { - "orchestratorType": "Kubernetes", - "orchestratorRelease": "1.10", - "kubernetesConfig": { - "networkPlugin": "flannel", - "containerRuntime": "containerd" - } - }, - "masterProfile": { - "count": 1, - "dnsPrefix": "", - "vmSize": "Standard_D2_v2" - }, - "agentPoolProfiles": [ - { - "name": "agentpool1", - "count": 3, - "vmSize": "Standard_D2_v2", - "availabilityProfile": "AvailabilitySet" - } - ], - "linuxProfile": { - "adminUsername": "azureuser", - "ssh": { - "publicKeys": [ - { - "keyData": "" - } - ] - } - }, - "servicePrincipalProfile": { - "clientId": "", - "secret": "" - } - } -} diff --git a/examples/networkpolicy/kubernetes-cilium.json b/examples/networkpolicy/kubernetes-cilium.json index f4cd723392..194568c772 100644 --- a/examples/networkpolicy/kubernetes-cilium.json +++ b/examples/networkpolicy/kubernetes-cilium.json @@ -3,7 +3,7 @@ "properties": { "orchestratorProfile": { "orchestratorType": "Kubernetes", - "orchestratorRelease": "1.9", + "orchestratorRelease": "1.10", "kubernetesConfig": { "networkPolicy": "cilium", "addons": [ diff --git a/examples/windows/README.md b/examples/windows/README.md index ad9f3d0c11..d171cd003e 100644 --- a/examples/windows/README.md +++ b/examples/windows/README.md @@ -1,4 +1,4 @@ -# Microsoft Azure Container Service Engine - Builds Docker Enabled Clusters +# Microsoft Azure Container Service Engine ## Overview @@ -8,4 +8,15 @@ These cluster definition examples demonstrate how to create customized Docker En * [Kubernetes Windows Walkthrough](../../docs/kubernetes/windows.md) - shows how to create a hybrid Kubernetes Windows enabled Docker cluster on Azure. * [Building Kubernetes Windows binaries](../../docs/kubernetes-build-win-binaries.md) - shows how to build kubernetes windows binaries for use in a Windows Kubernetes cluster. -* [Hybrid Swarm Mode with Linux and Windows nodes](../../docs/swarmmode-hybrid.md) - shows how to create a hybrid Swarm Mode cluster on Azure. \ No newline at end of file +* [Hybrid Swarm Mode with Linux and Windows nodes](../../docs/swarmmode-hybrid.md) - shows how to create a hybrid Swarm Mode cluster on Azure. + + +## Sample Deployments + +### Kubernetes + +- kubernetes.json - this is the simplest case for a 2-node Windows Kubernetes cluster +- kubernetes-custom-image.json - example using an existing Azure Managed Disk for Windows nodes. For example if you need a prerelease OS version, you can build a VHD, upload it and use this sample. +- kubernetes-hybrid.json - example with both Windows & Linux nodes in the same cluster +- kubernetes-wincni.json - example using kubenet plugin on Linux nodes and WinCNI on Windows +- kubernetes-windows-version.json - example of how to build a cluster with a specific Windows patch version \ No newline at end of file diff --git a/examples/e2e-tests/kubernetes/kubernetes-config/flannel.json b/examples/windows/kubernetes-wincni.json similarity index 65% rename from examples/e2e-tests/kubernetes/kubernetes-config/flannel.json rename to examples/windows/kubernetes-wincni.json index d015d34c53..874dc59a5a 100644 --- a/examples/e2e-tests/kubernetes/kubernetes-config/flannel.json +++ b/examples/windows/kubernetes-wincni.json @@ -4,7 +4,7 @@ "orchestratorProfile": { "orchestratorType": "Kubernetes", "kubernetesConfig": { - "networkPlugin":"flannel", + "networkPlugin": "kubenet" } }, "masterProfile": { @@ -14,12 +14,17 @@ }, "agentPoolProfiles": [ { - "name": "linuxpool1", - "count": 3, + "name": "agentwin", + "count": 2, "vmSize": "Standard_D2_v2", - "availabilityProfile": "AvailabilitySet" + "availabilityProfile": "AvailabilitySet", + "osType": "Windows" } ], + "windowsProfile": { + "adminUsername": "azureuser", + "adminPassword": "replacepassword1234$" + }, "linuxProfile": { "adminUsername": "azureuser", "ssh": { @@ -33,7 +38,6 @@ "servicePrincipalProfile": { "clientId": "", "secret": "" - }, - "certificateProfile": {} + } } } diff --git a/glide.lock b/glide.lock index 43144c197c..77f9fc7b84 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 6fa6f282ecddc833638528e7142991e81fbe33987514e2da1ece181a6470c568 -updated: 2018-06-08T11:35:55.674334-07:00 +hash: f9bc1ee5fe4bbff90c78d6851f3d91b617a0166b92137c3913d879c3cdb65876 +updated: 2018-07-02T13:50:41.754767835-07:00 imports: - name: github.com/alexcesaro/statsd version: 7fea3f0d2fab1ad973e641e51dba45443a311a90 @@ -129,6 +129,8 @@ imports: - matchers/support/goraph/node - matchers/support/goraph/util - types +- name: github.com/pkg/errors + version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/rjtsdl/conform version: 7b97338c67692998bc62b2572e96c8ff09803dc3 - name: github.com/satori/go.uuid @@ -166,6 +168,10 @@ imports: - http2/hpack - idna - lex/httplex +- name: golang.org/x/sync + version: 1d60e4601c6fd243af51cc01ddf169918a5407ca + subpackages: + - errgroup - name: golang.org/x/sys version: 95c6576299259db960f6c5b9b69ea52422860fce subpackages: diff --git a/glide.yaml b/glide.yaml index e1a9351c7c..1186d0cca2 100644 --- a/glide.yaml +++ b/glide.yaml @@ -58,16 +58,19 @@ import: version: 0841753fc26e934b715ca7a83dced5bcb721245a - package: k8s.io/client-go version: ~7.0.0 -# This is the same version client-go is pinned to - package: k8s.io/api version: 73d903622b7391f3312dcbac6483fed484e185f8 - package: github.com/Jeffail/gabs - version: 1.0 + version: "1.0" - package: github.com/rjtsdl/conform version: 1.2.1 - package: github.com/etgryphon/stringUp +- package: golang.org/x/sync + subpackages: + - errgroup +- package: github.com/pkg/errors + version: ~0.8.0 testImport: -# glide isn't able to mutually reconcile pinned versions of these deps - package: github.com/onsi/gomega - package: github.com/onsi/ginkgo - package: github.com/kelseyhightower/envconfig @@ -77,4 +80,6 @@ testImport: version: ~7.0.0 - package: k8s.io/api version: 73d903622b7391f3312dcbac6483fed484e185f8 -- package: github.com/influxdata/influxdb/client/v2 +- package: github.com/influxdata/influxdb + subpackages: + - client/v2 diff --git a/parts/k8s/addons/azure-cni-networkmonitor.yaml b/parts/k8s/addons/azure-cni-networkmonitor.yaml index 7bc3127e61..390044fcc7 100644 --- a/parts/k8s/addons/azure-cni-networkmonitor.yaml +++ b/parts/k8s/addons/azure-cni-networkmonitor.yaml @@ -20,6 +20,8 @@ spec: tolerations: - key: CriticalAddonsOnly operator: Exists + nodeSelector: + beta.kubernetes.io/os: linux containers: - name: azure-cnms image: diff --git a/parts/k8s/manifests/kubernetesmaster-audit-policy.yaml b/parts/k8s/addons/kubernetesmaster-audit-policy.yaml similarity index 100% rename from parts/k8s/manifests/kubernetesmaster-audit-policy.yaml rename to parts/k8s/addons/kubernetesmaster-audit-policy.yaml diff --git a/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml new file mode 100644 index 0000000000..300cec8b96 --- /dev/null +++ b/parts/k8s/addons/kubernetesmasteraddons-azure-npm-daemonset.yaml @@ -0,0 +1,103 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: "EnsureExists" +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: "EnsureExists" +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: "EnsureExists" +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: azure-npm + namespace: kube-system + labels: + app: azure-npm + addonmanager.kubernetes.io/mode: "EnsureExists" +spec: + selector: + matchLabels: + k8s-app: azure-npm + template: + metadata: + labels: + k8s-app: azure-npm + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: CriticalAddonsOnly + operator: Exists + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - name: azure-npm + image: containernetworking/azure-npm:v0.0.4 + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock + - name: log + mountPath: /var/log + priorityClassName: azure-npm-priority-class + hostNetwork: true + volumes: + - name: log + hostPath: + path: /var/log + type: Directory + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + serviceAccountName: azure-npm \ No newline at end of file diff --git a/parts/k8s/addons/kubernetesmasteraddons-calico-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-calico-daemonset.yaml index 85d3bd4d5a..c53052ec8d 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-calico-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-calico-daemonset.yaml @@ -210,6 +210,8 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: hostNetwork: true + nodeSelector: + beta.kubernetes.io/os: linux tolerations: - key: CriticalAddonsOnly operator: Exists diff --git a/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml index 2b4fa22b5b..b9c2f956e8 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-flannel-daemonset.yaml @@ -29,7 +29,7 @@ data: } net-conf.json: | { - "Network": "10.244.0.0/16", + "Network": "", "Backend": { "Type": "vxlan" } @@ -56,6 +56,7 @@ spec: hostNetwork: true nodeSelector: beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux tolerations: - key: node-role.kubernetes.io/master operator: Equal diff --git a/parts/k8s/addons/kubernetesmasteraddons-kube-rescheduler-deployment.yaml b/parts/k8s/addons/kubernetesmasteraddons-kube-rescheduler-deployment.yaml index fc62947df0..64d5c688a4 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-kube-rescheduler-deployment.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-kube-rescheduler-deployment.yaml @@ -19,6 +19,8 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - image: name: rescheduler diff --git a/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml index 6ed2906463..a67b6c92f0 100644 --- a/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-nvidia-device-plugin-daemonset.yaml @@ -1,26 +1,42 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: labels: + k8s-app: nvidia-device-plugin kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile name: nvidia-device-plugin namespace: kube-system spec: + selector: + matchLabels: + k8s-app: nvidia-device-plugin + updateStrategy: + type: RollingUpdate template: metadata: - # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler - # reserves resources for critical add-on pods so that they can be rescheduled after - # a failure. This annotation works in tandem with the toleration below. annotations: scheduler.alpha.kubernetes.io/critical-pod: "" labels: - name: nvidia-device-plugin-ds + k8s-app: nvidia-device-plugin spec: + priorityClassName: system-node-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: accelerator + operator: In + values: + - nvidia tolerations: - # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode. - # This, along with the annotation above marks this pod as a critical add-on. - key: CriticalAddonsOnly operator: Exists + - key: nvidia.com/gpu + effect: NoSchedule + operator: Equal + value: "true" containers: - image: name: nvidia-device-plugin-ctr @@ -37,3 +53,4 @@ spec: path: /var/lib/kubelet/device-plugins nodeSelector: beta.kubernetes.io/os: linux + accelerator: nvidia diff --git a/parts/k8s/addons/omsagent-daemonset.yaml b/parts/k8s/addons/kubernetesmasteraddons-omsagent-daemonset.yaml similarity index 99% rename from parts/k8s/addons/omsagent-daemonset.yaml rename to parts/k8s/addons/kubernetesmasteraddons-omsagent-daemonset.yaml index 48e94e1f0a..5ba51eb52c 100644 --- a/parts/k8s/addons/omsagent-daemonset.yaml +++ b/parts/k8s/addons/kubernetesmasteraddons-omsagent-daemonset.yaml @@ -222,6 +222,8 @@ metadata: value: my_aks_cluster - name: DISABLE_KUBE_SYSTEM_LOG_COLLECTION value: "true" + - name: ISTEST + value: true securityContext: privileged: true ports: diff --git a/parts/k8s/kubernetesagentcustomdata.yml b/parts/k8s/kubernetesagentcustomdata.yml index d5b6eece1c..28f69f2084 100644 --- a/parts/k8s/kubernetesagentcustomdata.yml +++ b/parts/k8s/kubernetesagentcustomdata.yml @@ -8,14 +8,15 @@ write_files: content: !!binary | {{WrapAsVariable "provisionSource"}} -{{if not .IsCoreOS}} +{{if .KubernetesConfig.RequiresDocker}} + {{if not .IsCoreOS}} - path: "/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf" permissions: "0644" owner: "root" content: | [Service] MountFlags=shared -{{end}} + {{end}} - path: "/etc/systemd/system/docker.service.d/exec_start.conf" permissions: "0644" owner: "root" @@ -34,15 +35,16 @@ write_files: "log-opts": { "max-size": "50m", "max-file": "5" - }{{if IsNVIDIADevicePluginEnabled}} + }{{if IsNSeriesSKU .}}{{if IsNVIDIADevicePluginEnabled}} ,"default-runtime": "nvidia", "runtimes": { "nvidia": { "path": "/usr/bin/nvidia-container-runtime", "runtimeArgs": [] } - }{{end}} + }{{end}}{{end}} } +{{end}} - path: "/etc/kubernetes/certs/ca.crt" permissions: "0644" @@ -139,7 +141,10 @@ AGENT_ARTIFACTS_CONFIG_PLACEHOLDER content: | #!/bin/bash /usr/bin/mkdir -p /etc/kubernetes/manifests + + {{if .KubernetesConfig.RequiresDocker}} usermod -aG docker {{WrapAsVariable "username"}} + {{end}} systemctl enable rpcbind systemctl enable rpc-statd diff --git a/parts/k8s/kubernetesagentresourcesvmas.t b/parts/k8s/kubernetesagentresourcesvmas.t index f2977f339e..ee939fbf1d 100644 --- a/parts/k8s/kubernetesagentresourcesvmas.t +++ b/parts/k8s/kubernetesagentresourcesvmas.t @@ -324,4 +324,26 @@ {{end}} } } + }, + { + "type": "Microsoft.Compute/virtualMachines/extensions", + "name": "[concat(variables('{{.Name}}VMNamePrefix'), copyIndex(variables('{{.Name}}Offset')), '/computeAksLinuxBilling')]", + "apiVersion": "[variables('apiVersionDefault')]", + "copy": { + "count": "[sub(variables('{{.Name}}Count'), variables('{{.Name}}Offset'))]", + "name": "vmLoopNode" + }, + "location": "[variables('location')]", + "dependsOn": [ + "[concat('Microsoft.Compute/virtualMachines/', variables('{{.Name}}VMNamePrefix'), copyIndex(variables('{{.Name}}Offset')))]" + ], + "properties": { + "publisher": "Microsoft.AKS", + "type": {{if IsHostedMaster}}"Compute.AKS.Linux.Billing"{{else}}"Compute.AKS-Engine.Linux.Billing"{{end}}, + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": { + } + } } + diff --git a/parts/k8s/kubernetesagentresourcesvmss.t b/parts/k8s/kubernetesagentresourcesvmss.t index 0977fd1255..d9264d2437 100644 --- a/parts/k8s/kubernetesagentresourcesvmss.t +++ b/parts/k8s/kubernetesagentresourcesvmss.t @@ -145,6 +145,17 @@ "commandToExecute": "[concat(variables('provisionScriptParametersCommon'),' /usr/bin/nohup /bin/bash -c \"/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1\"')]" } } + }, + { + "name": "[concat(variables('{{.Name}}VMNamePrefix'), '-computeAksLinuxBilling')]", + "location": "[variables('location')]", + "properties": { + "publisher": "Microsoft.AKS", + "type": "Compute.AKS-Engine.Linux.Billing", + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": {} + } } {{if UseManagedIdentity}} ,{ diff --git a/parts/k8s/kubernetesbase.t b/parts/k8s/kubernetesbase.t index e387be32c6..dd00475992 100644 --- a/parts/k8s/kubernetesbase.t +++ b/parts/k8s/kubernetesbase.t @@ -41,6 +41,11 @@ {{range $index, $agent := .AgentPoolProfiles}} "{{.Name}}Index": {{$index}}, {{template "k8s/kubernetesagentvars.t" .}} + {{if IsNSeriesSKU .}} + {{if IsNVIDIADevicePluginEnabled}} + "registerWithGpuTaints": "nvidia.com/gpu=true:NoSchedule", + {{end}} + {{end}} {{if .IsStorageAccount}} {{if .HasDisks}} "{{.Name}}DataAccountName": "[concat(variables('storageAccountBaseName'), 'data{{$index}}')]", @@ -51,6 +56,9 @@ {{template "k8s/kubernetesmastervars.t" .}} }, "resources": [ + {{if IsOpenShift}} + {{template "openshift/infraresources.t" .}} + {{end}} {{ range $index, $element := .AgentPoolProfiles}} {{if $index}}, {{end}} {{if .IsWindows}} @@ -184,4 +192,4 @@ {{end}} } -} \ No newline at end of file +} diff --git a/parts/k8s/kubernetescustomscript.sh b/parts/k8s/kubernetescustomscript.sh index 442a1f7c14..660cc0b826 100644 --- a/parts/k8s/kubernetescustomscript.sh +++ b/parts/k8s/kubernetescustomscript.sh @@ -24,6 +24,8 @@ ERR_K8S_RUNNING_TIMEOUT=30 # Timeout waiting for k8s cluster to be healthy ERR_K8S_DOWNLOAD_TIMEOUT=31 # Timeout waiting for Kubernetes download(s) ERR_KUBECTL_NOT_FOUND=32 # kubectl client binary not found on local disk ERR_CNI_DOWNLOAD_TIMEOUT=41 # Timeout waiting for CNI download(s) +ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42 # Timeout waiting for https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb +ERR_MS_PROD_DEB_PKG_ADD_FAIL=43 # Failed to add repo pkg file ERR_OUTBOUND_CONN_FAIL=50 # Unable to establish outbound connection ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80 # Unable to configure custom search domains ERR_APT_DAILY_TIMEOUT=98 # Timeout waiting for apt daily updates @@ -55,7 +57,7 @@ else fi function testOutboundConnection() { - retrycmd_if_failure 120 1 20 nc -v 8.8.8.8 53 || retrycmd_if_failure 120 1 20 nc -v 8.8.4.4 53 || exit $ERR_OUTBOUND_CONN_FAIL + retrycmd_if_failure 20 1 3 nc -v 8.8.8.8 53 || retrycmd_if_failure 20 1 3 nc -v 8.8.4.4 53 || exit $ERR_OUTBOUND_CONN_FAIL } function waitForCloudInit() { @@ -154,13 +156,15 @@ function installEtcd() { } function installDeps() { + retrycmd_if_failure_no_stats 20 1 5 curl -fsSL https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT + retrycmd_if_failure 60 5 10 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL echo `date`,`hostname`, apt-get_update_begin>>/opt/m - apt_get_update || exit $ERR_APT_INSTALL_TIMEOUT + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT echo `date`,`hostname`, apt-get_update_end>>/opt/m # make sure walinuxagent doesn't get updated in the middle of running this script retrycmd_if_failure 20 5 30 apt-mark hold walinuxagent || exit $ERR_HOLD_WALINUXAGENT # See https://github.com/kubernetes/kubernetes/blob/master/build/debian-hyperkube-base/Dockerfile#L25-L44 - apt_get_install 20 30 300 apt-transport-https ca-certificates iptables iproute2 ebtables socat util-linux mount ethtool init-system-helpers nfs-common ceph-common conntrack glusterfs-client ipset jq cgroup-lite git pigz xz-utils || exit $ERR_APT_INSTALL_TIMEOUT + apt_get_install 20 30 300 apt-transport-https ca-certificates iptables iproute2 ebtables socat util-linux mount ethtool init-system-helpers nfs-common ceph-common conntrack glusterfs-client ipset jq cgroup-lite git pigz xz-utils blobfuse fuse || exit $ERR_APT_INSTALL_TIMEOUT systemctlEnableAndStart rpcbind systemctlEnableAndStart rpc-statd } @@ -337,6 +341,7 @@ function installContainerd() { retrycmd_get_tarball 60 5 "$CONTAINERD_TGZ_TMP" "$CONTAINERD_DOWNLOAD_URL" tar -xzf "$CONTAINERD_TGZ_TMP" -C / rm -f "$CONTAINERD_TGZ_TMP" + sed -i '/\[Service\]/a ExecStartPost=\/sbin\/iptables -P FORWARD ACCEPT' /etc/systemd/system/containerd.service echo "Successfully installed cri-containerd..." if [[ "$CONTAINER_RUNTIME" == "clear-containers" ]] || [[ "$CONTAINER_RUNTIME" == "containerd" ]]; then @@ -367,9 +372,9 @@ function ensureKubelet() { function extractHyperkube(){ TMP_DIR=$(mktemp -d) - retrycmd_if_failure 100 1 30 curl -sSL -o /usr/local/bin/img "https://github.com/genuinetools/img/releases/download/v0.4.6/img-linux-amd64" + retrycmd_if_failure 100 1 30 curl -sSL -o /usr/local/bin/img "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.4.6" chmod +x /usr/local/bin/img - retrycmd_if_failure 100 1 60 img pull $HYPERKUBE_URL || $ERR_K8S_DOWNLOAD_TIMEOUT + retrycmd_if_failure 75 1 60 img pull $HYPERKUBE_URL || exit $ERR_K8S_DOWNLOAD_TIMEOUT path=$(find /tmp/img -name "hyperkube") if [[ $OS == $COREOS_OS_NAME ]]; then @@ -385,11 +390,11 @@ function extractHyperkube(){ } function ensureJournal(){ - systemctlEnableAndStart systemd-journald echo "Storage=persistent" >> /etc/systemd/journald.conf echo "SystemMaxUse=1G" >> /etc/systemd/journald.conf echo "RuntimeMaxUse=1G" >> /etc/systemd/journald.conf echo "ForwardToSyslog=no" >> /etc/systemd/journald.conf + systemctlEnableAndStart systemd-journald } function ensurePodSecurityPolicy() { @@ -511,9 +516,14 @@ if [ -f $CUSTOM_SEARCH_DOMAIN_SCRIPT ]; then fi installDeps -installDocker + +if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + installDocker + ensureDocker +fi + configureK8s -ensureDocker + configNetworkPlugin if [[ ! -z "${MASTER_NODE}" ]]; then diff --git a/parts/k8s/kubernetesmastercustomdata.yml b/parts/k8s/kubernetesmastercustomdata.yml index d3155a9ca9..44b4d0285a 100644 --- a/parts/k8s/kubernetesmastercustomdata.yml +++ b/parts/k8s/kubernetesmastercustomdata.yml @@ -14,6 +14,7 @@ write_files: content: !!binary | {{WrapAsVariable "provisionSource"}} +{{if .OrchestratorProfile.KubernetesConfig.RequiresDocker}} {{if not .MasterProfile.IsCoreOS}} - path: "/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf" permissions: "0644" @@ -46,6 +47,7 @@ write_files: "max-file": "5" } } +{{end}} - path: "/etc/kubernetes/certs/ca.crt" permissions: "0644" @@ -261,12 +263,16 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER # If Calico Policy enabled then update Cluster Cidr sed -i "s||{{WrapAsVariable "kubeClusterCidr"}}|g" "/etc/kubernetes/addons/calico-daemonset.yaml" {{end}} +{{if eq .OrchestratorProfile.KubernetesConfig.NetworkPlugin "flannel"}} + # If Flannel is enabled then update Cluster Cidr + sed -i "s||{{WrapAsVariable "kubeClusterCidr"}}|g" "/etc/kubernetes/addons/flannel-daemonset.yaml" +{{end}} {{if eq .OrchestratorProfile.KubernetesConfig.NetworkPolicy "cilium"}} # If Cilium Policy enabled then update the etcd certs and address sed -i "s||{{WrapAsVerbatim "variables('masterEtcdClientURLs')[copyIndex(variables('masterOffset'))]"}}|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" - sed -i "s||\"$(base64 -w 0 /etc/kubernetes/certs/ca.crt)\"|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" - sed -i "s||\"$(base64 -w 0 /etc/kubernetes/certs/etcdclient.key)\"|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" - sed -i "s||\"$(base64 -w 0 /etc/kubernetes/certs/etcdclient.crt)\"|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" + sed -i "s||$(base64 -w 0 /etc/kubernetes/certs/ca.crt)|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" + sed -i "s||$(base64 -w 0 /etc/kubernetes/certs/etcdclient.key)|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" + sed -i "s||$(base64 -w 0 /etc/kubernetes/certs/etcdclient.crt)|g" "/etc/kubernetes/addons/cilium-daemonset.yaml" {{end}} {{if UseCloudControllerManager }} sed -i "s||{{WrapAsVariable "kubernetesCcmImageSpec"}}|g" "/etc/kubernetes/manifests/cloud-controller-manager.yaml" @@ -288,15 +294,15 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER sed -i "s||{{WrapAsVariable "searchDomainRealmPassword"}}|g" "/opt/azure/containers/setup-custom-search-domains.sh" {{end}} {{if .OrchestratorProfile.IsContainerMonitoringEnabled}} - sed -i "s||{{WrapAsVariable "omsAgentVersion"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "dockerProviderVersion"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesContainerMonitoringSpec"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "workspaceGuid"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "workspaceKey"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesOMSAgentCPURequests"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesOMSAgentMemoryRequests"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesOMSAgentCPULimit"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" - sed -i "s||{{WrapAsVariable "kubernetesOMSAgentMemoryLimit"}}|g" "/etc/kubernetes/addons/kubernetesmasteraddons-omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentVersion"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentDockerProviderVersion"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentImage"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentWorkspaceGuid"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "omsAgentWorkspaceKey"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "kubernetesOMSAgentCPURequests"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "kubernetesOMSAgentMemoryRequests"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "kubernetesOMSAgentCPULimit"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" + sed -i "s||{{WrapAsVariable "kubernetesOMSAgentMemoryLimit"}}|g" "/etc/kubernetes/addons/omsagent-daemonset.yaml" {{end}} - path: "/opt/azure/containers/provision.sh" @@ -376,7 +382,10 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER systemctl restart etcd-member retrycmd_if_failure 5 5 10 curl --retry 5 --retry-delay 10 --retry-max-time 10 --max-time 60 http://127.0.0.1:2379/v2/machines mkdir -p /etc/kubernetes/manifests + + {{if .OrchestratorProfile.KubernetesConfig.RequiresDocker}} usermod -aG docker {{WrapAsVariable "username"}} + {{end}} {{if EnableAggregatedAPIs}} sudo bash /etc/kubernetes/generate-proxy-certs.sh diff --git a/parts/k8s/kubernetesmasterresources.t b/parts/k8s/kubernetesmasterresources.t index 8588639701..9c8aedb63c 100644 --- a/parts/k8s/kubernetesmasterresources.t +++ b/parts/k8s/kubernetesmasterresources.t @@ -1,162 +1,3 @@ -{{if IsOpenShift}} - { - "type": "Microsoft.Network/networkSecurityGroups", - "apiVersion": "[variables('apiVersionDefault')]", - "location": "[variables('location')]", - "name": "[variables('routerNSGName')]", - "properties": { - "securityRules": [ - { - "name": "allow_http", - "properties": { - "access": "Allow", - "description": "Allow http traffic to infra nodes", - "destinationAddressPrefix": "*", - "destinationPortRange": "80", - "direction": "Inbound", - "priority": 110, - "protocol": "Tcp", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } - }, - { - "name": "allow_https", - "properties": { - "access": "Allow", - "description": "Allow https traffic to infra nodes", - "destinationAddressPrefix": "*", - "destinationPortRange": "443", - "direction": "Inbound", - "priority": 111, - "protocol": "Tcp", - "sourceAddressPrefix": "*", - "sourcePortRange": "*" - } - } - ] - } - }, - { - "name": "[variables('routerIPName')]", - "type": "Microsoft.Network/publicIPAddresses", - "apiVersion": "2017-08-01", - "location": "[variables('location')]", - "properties": { - "publicIPAllocationMethod": "Static", - "dnsSettings": { - "domainNameLabel": "[concat(variables('masterFqdnPrefix'), '-router')]" - } - }, - "sku": { - "name": "Basic" - } - }, - { - "name": "[variables('routerLBName')]", - "type": "Microsoft.Network/loadBalancers", - "apiVersion": "2017-10-01", - "location": "[variables('location')]", - "dependsOn": [ - "[concat('Microsoft.Network/publicIPAddresses/', variables('routerIPName'))]" - ], - "properties": { - "frontendIPConfigurations": [ - { - "name": "frontend", - "properties": { - "privateIPAllocationMethod": "Dynamic", - "publicIPAddress": { - "id": "[resourceId('Microsoft.Network/publicIPAddresses', variables('routerIPName'))]" - } - } - } - ], - "backendAddressPools": [ - { - "name": "backend" - } - ], - "loadBalancingRules": [ - { - "name": "port-80", - "properties": { - "frontendIPConfiguration": { - "id": "[concat(variables('routerLBID'), '/frontendIPConfigurations/frontend')]" - }, - "frontendPort": 80, - "backendPort": 80, - "enableFloatingIP": false, - "idleTimeoutInMinutes": 4, - "protocol": "Tcp", - "loadDistribution": "Default", - "backendAddressPool": { - "id": "[concat(variables('routerLBID'), '/backendAddressPools/backend')]" - }, - "probe": { - "id": "[concat(variables('routerLBID'), '/probes/port-80')]" - } - } - }, - { - "name": "port-443", - "properties": { - "frontendIPConfiguration": { - "id": "[concat(variables('routerLBID'), '/frontendIPConfigurations/frontend')]" - }, - "frontendPort": 443, - "backendPort": 443, - "enableFloatingIP": false, - "idleTimeoutInMinutes": 4, - "protocol": "Tcp", - "loadDistribution": "Default", - "backendAddressPool": { - "id": "[concat(variables('routerLBID'), '/backendAddressPools/backend')]" - }, - "probe": { - "id": "[concat(variables('routerLBID'), '/probes/port-443')]" - } - } - } - ], - "probes": [ - { - "name": "port-80", - "properties": { - "protocol": "Tcp", - "port": 80, - "intervalInSeconds": 5, - "numberOfProbes": 2 - } - }, - { - "name": "port-443", - "properties": { - "protocol": "Tcp", - "port": 443, - "intervalInSeconds": 5, - "numberOfProbes": 2 - } - } - ], - "inboundNatRules": [], - "outboundNatRules": [], - "inboundNatPools": [] - }, - "sku": { - "name": "Basic" - } - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('apiVersionStorage')]", - "name": "[concat(variables('storageAccountBaseName'), 'registry')]", - "location": "[variables('location')]", - "properties": { - "accountType": "Standard_LRS" - } - }, -{{end}} {{if .MasterProfile.IsManagedDisks}} { "apiVersion": "[variables('apiVersionStorageManagedDisks')]", @@ -781,6 +622,15 @@ }, {{end}} {{if EnableEncryptionWithExternalKms}} + { + "type": "Microsoft.Storage/storageAccounts", + "name": "[variables('clusterKeyVaultName')]", + "apiVersion": "[variables('apiVersionStorage')]", + "location": "[variables('location')]", + "properties": { + "accountType": "Standard_LRS" + } + }, { "type": "Microsoft.KeyVault/vaults", "name": "[variables('clusterKeyVaultName')]", @@ -1054,4 +904,25 @@ {{end}} } } + }, + { + "type": "Microsoft.Compute/virtualMachines/extensions", + "name": "[concat(variables('masterVMNamePrefix'), copyIndex(variables('masterOffset')), '/computeAksLinuxBilling')]", + "apiVersion": "[variables('apiVersionDefault')]", + "copy": { + "count": "[sub(variables('masterCount'), variables('masterOffset'))]", + "name": "vmLoopNode" + }, + "location": "[variables('location')]", + "dependsOn": [ + "[concat('Microsoft.Compute/virtualMachines/', variables('masterVMNamePrefix'), copyIndex(variables('masterOffset')))]" + ], + "properties": { + "publisher": "Microsoft.AKS", + "type": "Compute.AKS-Engine.Linux.Billing", + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": { + } + } }{{WriteLinkedTemplatesForExtensions}} diff --git a/parts/k8s/kubernetesmastervars.t b/parts/k8s/kubernetesmastervars.t index e24b60105f..b1f4c1aa71 100644 --- a/parts/k8s/kubernetesmastervars.t +++ b/parts/k8s/kubernetesmastervars.t @@ -486,6 +486,9 @@ {{end}} {{if EnableEncryptionWithExternalKms}} ,"apiVersionKeyVault": "2016-10-01", - "clusterKeyVaultName": "[take(concat(resourceGroup().location, '-' , uniqueString(concat(variables('masterFqdnPrefix'),'-',resourceGroup().location))), 20)]", + {{if not .HasStorageAccountDisks}} + "apiVersionStorage": "2015-06-15", + {{end}} + "clusterKeyVaultName": "[take(concat('kv', tolower(uniqueString(concat(variables('masterFqdnPrefix'),variables('location'),variables('nameSuffix'))))), 22)]", "clusterKeyVaultSku" : "[parameters('clusterKeyVaultSku')]" {{end}} diff --git a/parts/k8s/kubernetesprovisionsource.sh b/parts/k8s/kubernetesprovisionsource.sh index 58083ab71e..e722acdb3a 100644 --- a/parts/k8s/kubernetesprovisionsource.sh +++ b/parts/k8s/kubernetesprovisionsource.sh @@ -58,6 +58,7 @@ apt_get_update() { apt_update_output=/tmp/apt-get-update.out for i in $(seq 1 $retries); do timeout 30 dpkg --configure -a + timeout 30 apt-get -f -y install timeout 120 apt-get update 2>&1 | tee $apt_update_output | grep -E "^([WE]:.*)|([eE]rr.*)$" [ $? -ne 0 ] && cat $apt_update_output && break || \ cat $apt_update_output diff --git a/parts/k8s/kuberneteswinagentresourcesvmas.t b/parts/k8s/kuberneteswinagentresourcesvmas.t index c7036f7619..4ae8bbeb97 100644 --- a/parts/k8s/kuberneteswinagentresourcesvmas.t +++ b/parts/k8s/kuberneteswinagentresourcesvmas.t @@ -274,4 +274,25 @@ "commandToExecute": "[concat('powershell.exe -ExecutionPolicy Unrestricted -command \"', '$arguments = ', variables('singleQuote'),'-MasterIP ',variables('kubernetesAPIServerIP'),' -KubeDnsServiceIp ',variables('kubeDnsServiceIp'),' -MasterFQDNPrefix ',variables('masterFqdnPrefix'),' -Location ',variables('location'),' -AgentKey ',variables('clientPrivateKey'),' -AADClientId ',variables('servicePrincipalClientId'),' -AADClientSecret ',variables('servicePrincipalClientSecret'),variables('singleQuote'), ' ; ', variables('windowsCustomScriptSuffix'), '\" > %SYSTEMDRIVE%\\AzureData\\CustomDataSetupScript.log 2>&1')]" } } + }, + { + "type": "Microsoft.Compute/virtualMachines/extensions", + "name": "[concat(variables('{{.Name}}VMNamePrefix'), copyIndex(variables('{{.Name}}Offset')), '/computeAksLinuxBilling')]", + "apiVersion": "[variables('apiVersionDefault')]", + "copy": { + "count": "[sub(variables('{{.Name}}Count'), variables('{{.Name}}Offset'))]", + "name": "vmLoopNode" + }, + "location": "[variables('location')]", + "dependsOn": [ + "[concat('Microsoft.Compute/virtualMachines/', variables('{{.Name}}VMNamePrefix'), copyIndex(variables('{{.Name}}Offset')))]" + ], + "properties": { + "publisher": "Microsoft.AKS", + "type": "Compute.AKS-Engine.Windows.Billing", + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": { + } + } } \ No newline at end of file diff --git a/parts/k8s/kuberneteswinagentresourcesvmss.t b/parts/k8s/kuberneteswinagentresourcesvmss.t index 80ff7077dd..6be6d9e0c1 100644 --- a/parts/k8s/kuberneteswinagentresourcesvmss.t +++ b/parts/k8s/kuberneteswinagentresourcesvmss.t @@ -113,6 +113,17 @@ "commandToExecute": "[concat('powershell.exe -ExecutionPolicy Unrestricted -command \"', '$arguments = ', variables('singleQuote'),'-MasterIP ',variables('kubernetesAPIServerIP'),' -KubeDnsServiceIp ',variables('kubeDnsServiceIp'),' -MasterFQDNPrefix ',variables('masterFqdnPrefix'),' -Location ',variables('location'),' -AgentKey ',variables('clientPrivateKey'),' -AADClientId ',variables('servicePrincipalClientId'),' -AADClientSecret ',variables('servicePrincipalClientSecret'),variables('singleQuote'), ' ; ', variables('windowsCustomScriptSuffix'), '\" > %SYSTEMDRIVE%\\AzureData\\CustomDataSetupScript.log 2>&1')]" } } + }, + { + "name": "[concat(variables('{{.Name}}VMNamePrefix'), '-computeAksLinuxBilling')]", + "location": "[variables('location')]", + "properties": { + "publisher": "Microsoft.AKS", + "type": "Compute.AKS-Engine.Windows.Billing", + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": {} + } } {{if UseManagedIdentity}} ,{ diff --git a/parts/k8s/kuberneteswindowssetup.ps1 b/parts/k8s/kuberneteswindowssetup.ps1 index 704d4da9c6..6acf4c4e6e 100644 --- a/parts/k8s/kuberneteswindowssetup.ps1 +++ b/parts/k8s/kuberneteswindowssetup.ps1 @@ -346,6 +346,7 @@ c:\k\kubelet.exe --hostname-override=`$env:computername --pod-infra-container-im `$global:KubeBinariesVersion = "$global:KubeBinariesVersion" `$global:CNIPath = "$global:CNIPath" `$global:NetworkMode = "$global:NetworkMode" +`$global:ExternalNetwork = "ext" `$global:CNIConfig = "$global:CNIConfig" `$global:HNSModule = "$global:HNSModule" `$global:VolumePluginDir = "$global:VolumePluginDir" @@ -360,6 +361,38 @@ Write-Host "NetworkPlugin azure, starting kubelet." # Turn off Firewall to enable pods to talk to service endpoints. (Kubelet should eventually do this) netsh advfirewall set allprofiles state off +# startup the service + +# Find if the primary external switch network exists. If not create one. +# This is done only once in the lifetime of the node +`$hnsNetwork = Get-HnsNetwork | ? Name -EQ `$global:ExternalNetwork +if (!`$hnsNetwork) +{ + Write-Host "Creating a new hns Network" + ipmo `$global:HNSModule + # Fixme : use a smallest range possible, that will not collide with any pod space + New-HNSNetwork -Type `$global:NetworkMode -AddressPrefix "192.168.255.0/30" -Gateway "192.168.255.1" -Name `$global:ExternalNetwork -Verbose +} + +# Find if network created by CNI exists, if yes, remove it +# This is required to keep the network non-persistent behavior +# Going forward, this would be done by HNS automatically during restart of the node + +`$hnsNetwork = Get-HnsNetwork | ? Name -EQ $global:KubeNetwork +if (`$hnsNetwork) +{ + # Cleanup all containers + docker ps -q | foreach {docker rm `$_ -f} + + Write-Host "Cleaning up old HNS network found" + Remove-HnsNetwork `$hnsNetwork + Start-Sleep 10 + `$cnijson = "$global:KubeDir" + "\azure-vnet*" + remove-item `$cnijson -ErrorAction SilentlyContinue +} + +# Restart Kubeproxy, which would wait, until the network is created +Restart-Service Kubeproxy $KubeletCommandLine @@ -510,11 +543,12 @@ catch `$env:KUBE_NETWORK = "$global:KubeNetwork" `$global:NetworkMode = "$global:NetworkMode" `$global:HNSModule = "$global:HNSModule" -`$hnsNetwork = Get-HnsNetwork | ? Type -EQ `$global:NetworkMode.ToLower() +`$hnsNetwork = Get-HnsNetwork | ? Name -EQ $global:KubeNetwork while (!`$hnsNetwork) { + Write-Host "Waiting for Network [$global:KubeNetwork] to be created . . ." Start-Sleep 10 - `$hnsNetwork = Get-HnsNetwork | ? Type -EQ `$global:NetworkMode.ToLower() + `$hnsNetwork = Get-HnsNetwork | ? Name -EQ $global:KubeNetwork } # @@ -624,6 +658,9 @@ try Write-Log "Set Internet Explorer" Set-Explorer + Write-Log "Start preProvisioning script" + PREPROVISION_EXTENSION + Write-Log "Setup Complete, reboot computer" Restart-Computer } diff --git a/parts/openshift/infraresources.t b/parts/openshift/infraresources.t new file mode 100644 index 0000000000..30d1c39e80 --- /dev/null +++ b/parts/openshift/infraresources.t @@ -0,0 +1,157 @@ + { + "type": "Microsoft.Network/networkSecurityGroups", + "apiVersion": "[variables('apiVersionDefault')]", + "location": "[variables('location')]", + "name": "[variables('routerNSGName')]", + "properties": { + "securityRules": [ + { + "name": "allow_http", + "properties": { + "access": "Allow", + "description": "Allow http traffic to infra nodes", + "destinationAddressPrefix": "*", + "destinationPortRange": "80", + "direction": "Inbound", + "priority": 110, + "protocol": "Tcp", + "sourceAddressPrefix": "*", + "sourcePortRange": "*" + } + }, + { + "name": "allow_https", + "properties": { + "access": "Allow", + "description": "Allow https traffic to infra nodes", + "destinationAddressPrefix": "*", + "destinationPortRange": "443", + "direction": "Inbound", + "priority": 111, + "protocol": "Tcp", + "sourceAddressPrefix": "*", + "sourcePortRange": "*" + } + } + ] + } + }, + { + "name": "[variables('routerIPName')]", + "type": "Microsoft.Network/publicIPAddresses", + "apiVersion": "2017-08-01", + "location": "[variables('location')]", + "properties": { + "publicIPAllocationMethod": "Static", + "dnsSettings": { + "domainNameLabel": "[concat(variables('masterFqdnPrefix'), '-router')]" + } + }, + "sku": { + "name": "Basic" + } + }, + { + "name": "[variables('routerLBName')]", + "type": "Microsoft.Network/loadBalancers", + "apiVersion": "2017-10-01", + "location": "[variables('location')]", + "dependsOn": [ + "[concat('Microsoft.Network/publicIPAddresses/', variables('routerIPName'))]" + ], + "properties": { + "frontendIPConfigurations": [ + { + "name": "frontend", + "properties": { + "privateIPAllocationMethod": "Dynamic", + "publicIPAddress": { + "id": "[resourceId('Microsoft.Network/publicIPAddresses', variables('routerIPName'))]" + } + } + } + ], + "backendAddressPools": [ + { + "name": "backend" + } + ], + "loadBalancingRules": [ + { + "name": "port-80", + "properties": { + "frontendIPConfiguration": { + "id": "[concat(variables('routerLBID'), '/frontendIPConfigurations/frontend')]" + }, + "frontendPort": 80, + "backendPort": 80, + "enableFloatingIP": false, + "idleTimeoutInMinutes": 4, + "protocol": "Tcp", + "loadDistribution": "Default", + "backendAddressPool": { + "id": "[concat(variables('routerLBID'), '/backendAddressPools/backend')]" + }, + "probe": { + "id": "[concat(variables('routerLBID'), '/probes/port-80')]" + } + } + }, + { + "name": "port-443", + "properties": { + "frontendIPConfiguration": { + "id": "[concat(variables('routerLBID'), '/frontendIPConfigurations/frontend')]" + }, + "frontendPort": 443, + "backendPort": 443, + "enableFloatingIP": false, + "idleTimeoutInMinutes": 4, + "protocol": "Tcp", + "loadDistribution": "Default", + "backendAddressPool": { + "id": "[concat(variables('routerLBID'), '/backendAddressPools/backend')]" + }, + "probe": { + "id": "[concat(variables('routerLBID'), '/probes/port-443')]" + } + } + } + ], + "probes": [ + { + "name": "port-80", + "properties": { + "protocol": "Tcp", + "port": 80, + "intervalInSeconds": 5, + "numberOfProbes": 2 + } + }, + { + "name": "port-443", + "properties": { + "protocol": "Tcp", + "port": 443, + "intervalInSeconds": 5, + "numberOfProbes": 2 + } + } + ], + "inboundNatRules": [], + "outboundNatRules": [], + "inboundNatPools": [] + }, + "sku": { + "name": "Basic" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('apiVersionStorage')]", + "name": "[concat(variables('storageAccountBaseName'), 'registry')]", + "location": "[variables('location')]", + "properties": { + "accountType": "Standard_LRS" + } + }, \ No newline at end of file diff --git a/parts/openshift/release-3.9/openshiftmasterscript.sh b/parts/openshift/release-3.9/openshiftmasterscript.sh index cacf656459..3b9d2d6db4 100644 --- a/parts/openshift/release-3.9/openshiftmasterscript.sh +++ b/parts/openshift/release-3.9/openshiftmasterscript.sh @@ -31,13 +31,6 @@ else COCKPIT_VERSION="latest" fi -# TODO: with WALinuxAgent>=v2.2.21 (https://github.com/Azure/WALinuxAgent/pull/1005) -# we should be able to append context=system_u:object_r:container_var_lib_t:s0 -# to ResourceDisk.MountOptions in /etc/waagent.conf and remove this stanza. -systemctl stop docker.service -restorecon -R /var/lib/docker -systemctl start docker.service - echo "BOOTSTRAP_CONFIG_NAME=node-config-master" >>/etc/sysconfig/${SERVICE_TYPE}-node for dst in tcp,2379 tcp,2380 tcp,8443 tcp,8444 tcp,8053 udp,8053 tcp,9090; do @@ -54,9 +47,14 @@ sed -i -e "s#--loglevel=2#--loglevel=4#" /etc/sysconfig/${SERVICE_TYPE}-master-c rm -rf /etc/etcd/* /etc/origin/master/* /etc/origin/node/* +MASTER_OREG_URL="$IMAGE_PREFIX/$IMAGE_TYPE" +if [[ -f /etc/origin/oreg_url ]]; then + MASTER_OREG_URL=$(cat /etc/origin/oreg_url) +fi + oc adm create-bootstrap-policy-file --filename=/etc/origin/master/policy.json -( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz) +( cd / && base64 -d <<< {{ .ConfigBundle | shellQuote }} | tar -xz) cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt update-ca-trust @@ -78,7 +76,7 @@ set -x ### # retrieve the public ip via dns for the router public ip and sub it in for the routingConfig.subdomain ### -routerLBHost="{{.RouterLBHostname}}" +routerLBHost={{ .RouterLBHostname | shellQuote }} routerLBIP=$(dig +short $routerLBHost) # NOTE: The version of openshift-ansible for origin defaults the ansible var @@ -101,6 +99,7 @@ for i in /etc/origin/master/master-config.yaml /tmp/bootstrapconfigs/* /tmp/ansi sed -i "s|COCKPIT_VERSION|${COCKPIT_VERSION}|g; s|COCKPIT_BASENAME|${COCKPIT_BASENAME}|g; s|COCKPIT_PREFIX|${COCKPIT_PREFIX}|g;" $i sed -i "s|VERSION|${VERSION}|g; s|SHORT_VER|${VERSION%.*}|g; s|SERVICE_TYPE|${SERVICE_TYPE}|g; s|IMAGE_TYPE|${IMAGE_TYPE}|g" $i sed -i "s|HOSTNAME|${HOSTNAME}|g;" $i + sed -i "s|MASTER_OREG_URL|${MASTER_OREG_URL}|g" $i done # note: ${SERVICE_TYPE}-node crash loops until master is up @@ -132,7 +131,7 @@ metadata: provisioner: kubernetes.io/azure-disk parameters: skuName: Premium_LRS - location: {{ .Location }} + location: {{ .Location | quote }} kind: managed EOF diff --git a/parts/openshift/release-3.9/openshiftnodescript.sh b/parts/openshift/release-3.9/openshiftnodescript.sh index af105ed702..a70a8308e9 100644 --- a/parts/openshift/release-3.9/openshiftnodescript.sh +++ b/parts/openshift/release-3.9/openshiftnodescript.sh @@ -7,13 +7,6 @@ if [ -f "/etc/sysconfig/atomic-openshift-node" ]; then SERVICE_TYPE=atomic-openshift fi -# TODO: with WALinuxAgent>=v2.2.21 (https://github.com/Azure/WALinuxAgent/pull/1005) -# we should be able to append context=system_u:object_r:container_var_lib_t:s0 -# to ResourceDisk.MountOptions in /etc/waagent.conf and remove this stanza. -systemctl stop docker.service -restorecon -R /var/lib/docker -systemctl start docker.service - {{if eq .Role "infra"}} echo "BOOTSTRAP_CONFIG_NAME=node-config-infra" >>/etc/sysconfig/${SERVICE_TYPE}-node {{else}} @@ -24,7 +17,7 @@ sed -i -e "s#--loglevel=2#--loglevel=4#" /etc/sysconfig/${SERVICE_TYPE}-node rm -rf /etc/etcd/* /etc/origin/master/* /etc/origin/node/* -( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz) +( cd / && base64 -d <<< {{ .ConfigBundle | shellQuote }} | tar -xz) cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt update-ca-trust diff --git a/parts/openshift/unstable/openshiftmasterscript.sh b/parts/openshift/unstable/openshiftmasterscript.sh index e0329b3384..b6b012976d 100644 --- a/parts/openshift/unstable/openshiftmasterscript.sh +++ b/parts/openshift/unstable/openshiftmasterscript.sh @@ -32,10 +32,35 @@ else COCKPIT_VERSION="latest" fi -# TODO: with WALinuxAgent>=v2.2.21 (https://github.com/Azure/WALinuxAgent/pull/1005) -# we should be able to append context=system_u:object_r:container_var_lib_t:s0 -# to ResourceDisk.MountOptions in /etc/waagent.conf and remove this stanza. +if grep -q ^ResourceDisk.Filesystem=xfs /etc/waagent.conf; then + # Bad image: docker and waagent are racing. Try to fix up. Leave this code + # until the bad images have gone away. + set +e + + # stop docker if it hasn't failed already + systemctl stop docker.service + + # wait until waagent has run mkfs and mounted /var/lib/docker + while ! mountpoint -q /var/lib/docker; do + sleep 1 + done + + # now roll us back. /var/lib/docker/* may be mounted if docker lost the + # race. + umount /var/lib/docker + umount /var/lib/docker/* + + # disable waagent from racing again if we reboot. + sed -i -e '/^ResourceDisk.Format=/ s/=.*/=n/' /etc/waagent.conf + set -e +fi + systemctl stop docker.service +# Also a bad image: the umount should also go away. +umount /var/lib/docker || true +mkfs.xfs -f /dev/sdb1 +echo '/dev/sdb1 /var/lib/docker xfs grpquota 0 0' >>/etc/fstab +mount /var/lib/docker restorecon -R /var/lib/docker systemctl start docker.service @@ -55,7 +80,7 @@ mkdir -p /etc/origin/master oc adm create-bootstrap-policy-file --filename=/etc/origin/master/policy.json -( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz) +( cd / && base64 -d <<< {{ .ConfigBundle | shellQuote }} | tar -xz) cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt update-ca-trust @@ -77,7 +102,7 @@ set -x ### # retrieve the public ip via dns for the router public ip and sub it in for the routingConfig.subdomain ### -routerLBHost="{{.RouterLBHostname}}" +routerLBHost={{ .RouterLBHostname | shellQuote }} routerLBIP=$(dig +short $routerLBHost) # NOTE: The version of openshift-ansible for origin defaults the ansible var @@ -94,12 +119,18 @@ else sed -i "s|PROMETHEUS_EXPORTER_VERSION|${PROMETHEUS_EXPORTER_VERSION}|g;" /tmp/ansible/azure-local-master-inventory.yml fi +MASTER_OREG_URL="$IMAGE_PREFIX/$IMAGE_TYPE" +if [[ -f /etc/origin/oreg_url ]]; then + MASTER_OREG_URL=$(cat /etc/origin/oreg_url) +fi + for i in /etc/origin/master/master-config.yaml /tmp/bootstrapconfigs/* /tmp/ansible/azure-local-master-inventory.yml; do sed -i "s/TEMPROUTERIP/${routerLBIP}/; s|IMAGE_PREFIX|$IMAGE_PREFIX|g; s|ANSIBLE_DEPLOY_TYPE|$ANSIBLE_DEPLOY_TYPE|g" $i sed -i "s|REGISTRY_STORAGE_AZURE_ACCOUNTNAME|${REGISTRY_STORAGE_AZURE_ACCOUNTNAME}|g; s|REGISTRY_STORAGE_AZURE_ACCOUNTKEY|${REGISTRY_STORAGE_AZURE_ACCOUNTKEY}|g" $i sed -i "s|COCKPIT_VERSION|${COCKPIT_VERSION}|g; s|COCKPIT_BASENAME|${COCKPIT_BASENAME}|g; s|COCKPIT_PREFIX|${COCKPIT_PREFIX}|g;" $i sed -i "s|VERSION|${VERSION}|g; s|SHORT_VER|${VERSION%.*}|g; s|SERVICE_TYPE|${SERVICE_TYPE}|g; s|IMAGE_TYPE|${IMAGE_TYPE}|g" $i sed -i "s|HOSTNAME|${HOSTNAME}|g;" $i + sed -i "s|MASTER_OREG_URL|${MASTER_OREG_URL}|g" $i done mkdir -p /root/.kube @@ -141,7 +172,7 @@ metadata: provisioner: kubernetes.io/azure-disk parameters: skuName: Premium_LRS - location: {{ .Location }} + location: {{ .Location | quote }} kind: managed EOF diff --git a/parts/openshift/unstable/openshiftnodescript.sh b/parts/openshift/unstable/openshiftnodescript.sh index a42839397e..7c8b79aff3 100644 --- a/parts/openshift/unstable/openshiftnodescript.sh +++ b/parts/openshift/unstable/openshiftnodescript.sh @@ -7,10 +7,35 @@ if [ -f "/etc/sysconfig/atomic-openshift-node" ]; then SERVICE_TYPE=atomic-openshift fi -# TODO: with WALinuxAgent>=v2.2.21 (https://github.com/Azure/WALinuxAgent/pull/1005) -# we should be able to append context=system_u:object_r:container_var_lib_t:s0 -# to ResourceDisk.MountOptions in /etc/waagent.conf and remove this stanza. +if grep -q ^ResourceDisk.Filesystem=xfs /etc/waagent.conf; then + # Bad image: docker and waagent are racing. Try to fix up. Leave this code + # until the bad images have gone away. + set +e + + # stop docker if it hasn't failed already + systemctl stop docker.service + + # wait until waagent has run mkfs and mounted /var/lib/docker + while ! mountpoint -q /var/lib/docker; do + sleep 1 + done + + # now roll us back. /var/lib/docker/* may be mounted if docker lost the + # race. + umount /var/lib/docker + umount /var/lib/docker/* + + # disable waagent from racing again if we reboot. + sed -i -e '/^ResourceDisk.Format=/ s/=.*/=n/' /etc/waagent.conf + set -e +fi + systemctl stop docker.service +# Also a bad image: the umount should also go away. +umount /var/lib/docker || true +mkfs.xfs -f /dev/sdb1 +echo '/dev/sdb1 /var/lib/docker xfs grpquota 0 0' >>/etc/fstab +mount /var/lib/docker restorecon -R /var/lib/docker systemctl start docker.service @@ -24,7 +49,7 @@ sed -i -e "s#DEBUG_LOGLEVEL=2#DEBUG_LOGLEVEL=4#" /etc/sysconfig/${SERVICE_TYPE}- rm -rf /etc/etcd/* /etc/origin/master/* -( cd / && base64 -d <<< {{ .ConfigBundle }} | tar -xz) +( cd / && base64 -d <<< {{ .ConfigBundle | shellQuote }} | tar -xz) cp /etc/origin/node/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt update-ca-trust diff --git a/pkg/acsengine/addons.go b/pkg/acsengine/addons.go index 83ffeced38..745b0e9347 100644 --- a/pkg/acsengine/addons.go +++ b/pkg/acsengine/addons.go @@ -72,6 +72,11 @@ func kubernetesAddonSettingsInit(profile *api.Properties) []kubernetesFeatureSet "kube-rescheduler-deployment.yaml", profile.OrchestratorProfile.KubernetesConfig.IsReschedulerEnabled(), }, + { + "kubernetesmasteraddons-azure-npm-daemonset.yaml", + "azure-npm-daemonset.yaml", + profile.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure && profile.OrchestratorProfile.KubernetesConfig.NetworkPlugin == NetworkPluginAzure, + }, { "kubernetesmasteraddons-calico-daemonset.yaml", "calico-daemonset.yaml", @@ -103,7 +108,7 @@ func kubernetesAddonSettingsInit(profile *api.Properties) []kubernetesFeatureSet profile.OrchestratorProfile.IsMetricsServerEnabled(), }, { - "omsagent-daemonset.yaml", + "kubernetesmasteraddons-omsagent-daemonset.yaml", "omsagent-daemonset.yaml", profile.OrchestratorProfile.IsContainerMonitoringEnabled(), }, @@ -112,6 +117,11 @@ func kubernetesAddonSettingsInit(profile *api.Properties) []kubernetesFeatureSet "azure-cni-networkmonitor.yaml", profile.OrchestratorProfile.IsAzureCNI(), }, + { + "kubernetesmaster-audit-policy.yaml", + "audit-policy.yaml", + common.IsKubernetesVersionGe(profile.OrchestratorProfile.OrchestratorVersion, "1.8.0"), + }, } } @@ -137,11 +147,6 @@ func kubernetesManifestSettingsInit(profile *api.Properties) []kubernetesFeature "pod-security-policy.yaml", helpers.IsTrueBoolPointer(profile.OrchestratorProfile.KubernetesConfig.EnablePodSecurityPolicy), }, - { - "kubernetesmaster-audit-policy.yaml", - "audit-policy.yaml", - common.IsKubernetesVersionGe(profile.OrchestratorProfile.OrchestratorVersion, "1.8.0"), - }, { "kubernetesmaster-kube-apiserver.yaml", "kube-apiserver.yaml", diff --git a/pkg/acsengine/const.go b/pkg/acsengine/const.go index 5ed6d8942e..afaa920f6d 100644 --- a/pkg/acsengine/const.go +++ b/pkg/acsengine/const.go @@ -50,7 +50,9 @@ const ( NetworkPolicyCalico = "calico" // NetworkPolicyCilium is the string expression for cilium network policy config option NetworkPolicyCilium = "cilium" - // NetworkPluginAzure is the string expression for Azure CNI network policy + // NetworkPolicyAzure is the string expression for Azure CNI network policy manager + NetworkPolicyAzure = "azure" + // NetworkPluginAzure is the string expression for Azure CNI plugin NetworkPluginAzure = "azure" // NetworkPluginKubenet is the string expression for kubenet network plugin NetworkPluginKubenet = "kubenet" @@ -123,7 +125,7 @@ const ( // DefaultOpenshiftOrchestratorName specifies the 3 character orchestrator code of the cluster template and affects resource naming. DefaultOpenshiftOrchestratorName = "ocp" // DefaultEtcdVersion specifies the default etcd version to install - DefaultEtcdVersion = "3.2.16" + DefaultEtcdVersion = "3.2.23" // DefaultEtcdDiskSize specifies the default size for Kubernetes master etcd disk volumes in GB DefaultEtcdDiskSize = "256" // DefaultEtcdDiskSizeGT3Nodes = size for Kubernetes master etcd disk volumes in GB if > 3 nodes @@ -136,12 +138,14 @@ const ( DefaultReschedulerAddonName = "rescheduler" // DefaultMetricsServerAddonName is the name of the kubernetes Metrics server addon deployment DefaultMetricsServerAddonName = "metrics-server" - // DefaultNVIDIADevicePluginAddonName is the name of the kubernetes NVIDIA Device Plugin daemon set - DefaultNVIDIADevicePluginAddonName = "nvidia-device-plugin" + // NVIDIADevicePluginAddonName is the name of the kubernetes NVIDIA Device Plugin daemon set + NVIDIADevicePluginAddonName = "nvidia-device-plugin" // ContainerMonitoringAddonName is the name of the kubernetes Container Monitoring addon deployment ContainerMonitoringAddonName = "container-monitoring" // AzureCNINetworkMonitoringAddonName is the name of the Azure CNI networkmonitor addon AzureCNINetworkMonitoringAddonName = "azure-cni-networkmonitor" + // AzureNetworkPolicyAddonName is the name of the Azure CNI networkmonitor addon + AzureNetworkPolicyAddonName = "azure-npm-daemonset" // DefaultKubernetesKubeletMaxPods is the max pods per kubelet DefaultKubernetesKubeletMaxPods = 110 // DefaultMasterEtcdServerPort is the default etcd server port for Kubernetes master nodes @@ -260,6 +264,7 @@ const ( kubernetesWinAgentVarsVMSS = "k8s/kuberneteswinagentresourcesvmss.t" masterOutputs = "masteroutputs.t" masterParams = "masterparams.t" + openshiftInfraResources = "openshift/infraresources.t" swarmBaseFile = "swarm/swarmbase.t" swarmParams = "swarm/swarmparams.t" swarmAgentResourcesVMAS = "swarm/swarmagentresourcesvmas.t" diff --git a/pkg/acsengine/defaults-apiserver.go b/pkg/acsengine/defaults-apiserver.go index 0a1b2bf862..28098db3f4 100644 --- a/pkg/acsengine/defaults-apiserver.go +++ b/pkg/acsengine/defaults-apiserver.go @@ -79,7 +79,7 @@ func setAPIServerConfig(cs *api.ContainerService) { // Audit Policy configuration if common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.8.0") { - staticAPIServerConfig["--audit-policy-file"] = "/etc/kubernetes/manifests/audit-policy.yaml" + defaultAPIServerConfig["--audit-policy-file"] = "/etc/kubernetes/addons/audit-policy.yaml" } // RBAC configuration @@ -143,9 +143,9 @@ func getDefaultAdmissionControls(cs *api.ContainerService) (string, string) { // Add new version case when applying admission controllers only available in that version or later switch { case common.IsKubernetesVersionGe(o.OrchestratorVersion, "1.9.0"): - admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" + admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,AlwaysPullImages,ExtendedResourceToleration" default: - admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" + admissionControlValues = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,AlwaysPullImages" } // Pod Security Policy configuration diff --git a/pkg/acsengine/defaults-apiserver_test.go b/pkg/acsengine/defaults-apiserver_test.go index ee1520b331..447b21f86a 100644 --- a/pkg/acsengine/defaults-apiserver_test.go +++ b/pkg/acsengine/defaults-apiserver_test.go @@ -5,14 +5,13 @@ import ( "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/acs-engine/pkg/helpers" - "github.com/satori/go.uuid" ) const defaultTestClusterVer = "1.7.12" func TestAPIServerConfigEnableDataEncryptionAtRest(t *testing.T) { // Test EnableDataEncryptionAtRest = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableDataEncryptionAtRest = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -22,7 +21,7 @@ func TestAPIServerConfigEnableDataEncryptionAtRest(t *testing.T) { } // Test EnableDataEncryptionAtRest = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableDataEncryptionAtRest = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -34,7 +33,7 @@ func TestAPIServerConfigEnableDataEncryptionAtRest(t *testing.T) { func TestAPIServerConfigEnableEncryptionWithExternalKms(t *testing.T) { // Test EnableEncryptionWithExternalKms = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableEncryptionWithExternalKms = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -44,7 +43,7 @@ func TestAPIServerConfigEnableEncryptionWithExternalKms(t *testing.T) { } // Test EnableEncryptionWithExternalKms = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableEncryptionWithExternalKms = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -56,7 +55,7 @@ func TestAPIServerConfigEnableEncryptionWithExternalKms(t *testing.T) { func TestAPIServerConfigEnableAggregatedAPIs(t *testing.T) { // Test EnableAggregatedAPIs = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableAggregatedAPIs = true setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -90,7 +89,7 @@ func TestAPIServerConfigEnableAggregatedAPIs(t *testing.T) { } // Test EnableAggregatedAPIs = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableAggregatedAPIs = false setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -106,7 +105,7 @@ func TestAPIServerConfigEnableAggregatedAPIs(t *testing.T) { func TestAPIServerConfigUseCloudControllerManager(t *testing.T) { // Test UseCloudControllerManager = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -120,7 +119,7 @@ func TestAPIServerConfigUseCloudControllerManager(t *testing.T) { } // Test UseCloudControllerManager = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -136,7 +135,7 @@ func TestAPIServerConfigUseCloudControllerManager(t *testing.T) { func TestAPIServerConfigHasAadProfile(t *testing.T) { // Test HasAadProfile = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.AADProfile = &api.AADProfile{ ServerAppID: "test-id", TenantID: "test-tenant", @@ -161,7 +160,7 @@ func TestAPIServerConfigHasAadProfile(t *testing.T) { } // Test OIDC user overrides - cs = createContainerService("testcluster", "1.7.12", 3, 2) + cs = CreateMockContainerService("testcluster", "1.7.12", 3, 2, false) cs.Properties.AADProfile = &api.AADProfile{ ServerAppID: "test-id", TenantID: "test-tenant", @@ -196,7 +195,7 @@ func TestAPIServerConfigHasAadProfile(t *testing.T) { } // Test China Cloud settings - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.AADProfile = &api.AADProfile{ ServerAppID: "test-id", TenantID: "test-tenant", @@ -234,7 +233,7 @@ func TestAPIServerConfigHasAadProfile(t *testing.T) { } // Test HasAadProfile = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig for _, key := range []string{"--oidc-username-claim", "--oidc-groups-claim", "--oidc-client-id", "--oidc-issuer-url"} { @@ -247,7 +246,7 @@ func TestAPIServerConfigHasAadProfile(t *testing.T) { func TestAPIServerConfigEnableRbac(t *testing.T) { // Test EnableRbac = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableRbac = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -257,7 +256,7 @@ func TestAPIServerConfigEnableRbac(t *testing.T) { } // Test EnableRbac = true with 1.6 cluster - cs = createContainerService("testcluster", "1.6.11", 3, 2) + cs = CreateMockContainerService("testcluster", "1.6.11", 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableRbac = helpers.PointerToBool(true) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -267,7 +266,7 @@ func TestAPIServerConfigEnableRbac(t *testing.T) { } // Test EnableRbac = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableRbac = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -277,7 +276,7 @@ func TestAPIServerConfigEnableRbac(t *testing.T) { } // Test EnableRbac = false with 1.6 cluster - cs = createContainerService("testcluster", "1.6.11", 3, 2) + cs = CreateMockContainerService("testcluster", "1.6.11", 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableRbac = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -289,7 +288,7 @@ func TestAPIServerConfigEnableRbac(t *testing.T) { func TestAPIServerConfigEnableSecureKubelet(t *testing.T) { // Test EnableSecureKubelet = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet = helpers.PointerToBool(true) setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -303,7 +302,7 @@ func TestAPIServerConfigEnableSecureKubelet(t *testing.T) { } // Test EnableSecureKubelet = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet = helpers.PointerToBool(false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -320,9 +319,9 @@ func TestAPIServerConfigDefaultAdmissionControls(t *testing.T) { version := "1.10.0" enableAdmissionPluginsKey := "--enable-admission-plugins" admissonControlKey := "--admission-control" - cs := createContainerService("testcluster", version, 3, 2) + cs := CreateMockContainerService("testcluster", version, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig = map[string]string{} - cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig[admissonControlKey] = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,DenyEscalatingExec,AlwaysPullImages" + cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig[admissonControlKey] = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,AlwaysPullImages,ExtendedResourceToleration" setAPIServerConfig(cs) a := cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -338,7 +337,7 @@ func TestAPIServerConfigDefaultAdmissionControls(t *testing.T) { // Test --admission-control for v1.9 and below version = "1.9.0" - cs = createContainerService("testcluster", version, 3, 2) + cs = CreateMockContainerService("testcluster", version, 3, 2, false) setAPIServerConfig(cs) a = cs.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig @@ -352,71 +351,3 @@ func TestAPIServerConfigDefaultAdmissionControls(t *testing.T) { t.Fatalf("Admission control key '%s' not set in API server config for version %s", enableAdmissionPluginsKey, version) } } - -func createContainerService(containerServiceName string, orchestratorVersion string, masterCount int, agentCount int) *api.ContainerService { - cs := api.ContainerService{} - cs.ID = uuid.NewV4().String() - cs.Location = "eastus" - cs.Name = containerServiceName - - cs.Properties = &api.Properties{} - - cs.Properties.MasterProfile = &api.MasterProfile{} - cs.Properties.MasterProfile.Count = masterCount - cs.Properties.MasterProfile.DNSPrefix = "testmaster" - cs.Properties.MasterProfile.VMSize = "Standard_D2_v2" - - cs.Properties.AgentPoolProfiles = []*api.AgentPoolProfile{} - agentPool := &api.AgentPoolProfile{} - agentPool.Count = agentCount - agentPool.Name = "agentpool1" - agentPool.VMSize = "Standard_D2_v2" - agentPool.OSType = "Linux" - agentPool.AvailabilityProfile = "AvailabilitySet" - agentPool.StorageProfile = "StorageAccount" - - cs.Properties.AgentPoolProfiles = append(cs.Properties.AgentPoolProfiles, agentPool) - - cs.Properties.LinuxProfile = &api.LinuxProfile{ - AdminUsername: "azureuser", - SSH: struct { - PublicKeys []api.PublicKey `json:"publicKeys"` - }{}, - } - - cs.Properties.LinuxProfile.AdminUsername = "azureuser" - cs.Properties.LinuxProfile.SSH.PublicKeys = append( - cs.Properties.LinuxProfile.SSH.PublicKeys, api.PublicKey{KeyData: "test"}) - - cs.Properties.ServicePrincipalProfile = &api.ServicePrincipalProfile{} - cs.Properties.ServicePrincipalProfile.ClientID = "DEC923E3-1EF1-4745-9516-37906D56DEC4" - cs.Properties.ServicePrincipalProfile.Secret = "DEC923E3-1EF1-4745-9516-37906D56DEC4" - - cs.Properties.OrchestratorProfile = &api.OrchestratorProfile{} - cs.Properties.OrchestratorProfile.OrchestratorType = api.Kubernetes - cs.Properties.OrchestratorProfile.OrchestratorVersion = orchestratorVersion - cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{ - EnableSecureKubelet: helpers.PointerToBool(api.DefaultSecureKubeletEnabled), - EnableRbac: helpers.PointerToBool(api.DefaultRBACEnabled), - EtcdDiskSizeGB: DefaultEtcdDiskSize, - ServiceCIDR: DefaultKubernetesServiceCIDR, - DockerBridgeSubnet: DefaultDockerBridgeSubnet, - DNSServiceIP: DefaultKubernetesDNSServiceIP, - GCLowThreshold: DefaultKubernetesGCLowThreshold, - GCHighThreshold: DefaultKubernetesGCHighThreshold, - MaxPods: DefaultKubernetesMaxPodsVNETIntegrated, - ClusterSubnet: DefaultKubernetesSubnet, - ContainerRuntime: DefaultContainerRuntime, - NetworkPlugin: DefaultNetworkPlugin, - NetworkPolicy: DefaultNetworkPolicy, - EtcdVersion: DefaultEtcdVersion, - KubeletConfig: make(map[string]string), - } - - cs.Properties.CertificateProfile = &api.CertificateProfile{} - cs.Properties.CertificateProfile.CaCertificate = "cacert" - cs.Properties.CertificateProfile.KubeConfigCertificate = "kubeconfigcert" - cs.Properties.CertificateProfile.KubeConfigPrivateKey = "kubeconfigkey" - - return &cs -} diff --git a/pkg/acsengine/defaults-kubelet_test.go b/pkg/acsengine/defaults-kubelet_test.go index 7a8e68783f..b2c51d78c3 100644 --- a/pkg/acsengine/defaults-kubelet_test.go +++ b/pkg/acsengine/defaults-kubelet_test.go @@ -8,7 +8,7 @@ import ( ) func TestKubeletConfigDefaults(t *testing.T) { - cs := createContainerService("testcluster", "1.8.6", 3, 2) + cs := CreateMockContainerService("testcluster", "1.8.6", 3, 2, false) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig // TODO test all default config values @@ -22,7 +22,7 @@ func TestKubeletConfigDefaults(t *testing.T) { } } - cs = createContainerService("testcluster", "1.8.6", 3, 2) + cs = CreateMockContainerService("testcluster", "1.8.6", 3, 2, false) // TODO test all default overrides overrideVal := "/etc/override" cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig = map[string]string{ @@ -40,7 +40,7 @@ func TestKubeletConfigDefaults(t *testing.T) { func TestKubeletConfigUseCloudControllerManager(t *testing.T) { // Test UseCloudControllerManager = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager = helpers.PointerToBool(true) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -50,7 +50,7 @@ func TestKubeletConfigUseCloudControllerManager(t *testing.T) { } // Test UseCloudControllerManager = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager = helpers.PointerToBool(false) setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -63,7 +63,7 @@ func TestKubeletConfigUseCloudControllerManager(t *testing.T) { func TestKubeletConfigCloudConfig(t *testing.T) { // Test default value and custom value for --cloud-config - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig if k["--cloud-config"] != "/etc/kubernetes/azure.json" { @@ -71,7 +71,7 @@ func TestKubeletConfigCloudConfig(t *testing.T) { k["--cloud-config"]) } - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig["--cloud-config"] = "custom.json" setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -83,7 +83,7 @@ func TestKubeletConfigCloudConfig(t *testing.T) { func TestKubeletConfigAzureContainerRegistryCofig(t *testing.T) { // Test default value and custom value for --azure-container-registry-config - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig if k["--azure-container-registry-config"] != "/etc/kubernetes/azure.json" { @@ -91,7 +91,7 @@ func TestKubeletConfigAzureContainerRegistryCofig(t *testing.T) { k["--azure-container-registry-config"]) } - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig["--azure-container-registry-config"] = "custom.json" setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -103,7 +103,7 @@ func TestKubeletConfigAzureContainerRegistryCofig(t *testing.T) { func TestKubeletConfigNetworkPlugin(t *testing.T) { // Test NetworkPlugin = "kubenet" - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = NetworkPluginKubenet setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -113,7 +113,7 @@ func TestKubeletConfigNetworkPlugin(t *testing.T) { } // Test NetworkPlugin = "azure" - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = NetworkPluginAzure setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -126,7 +126,7 @@ func TestKubeletConfigNetworkPlugin(t *testing.T) { func TestKubeletConfigEnableSecureKubelet(t *testing.T) { // Test EnableSecureKubelet = true - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet = helpers.PointerToBool(true) setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -144,7 +144,7 @@ func TestKubeletConfigEnableSecureKubelet(t *testing.T) { } // Test EnableSecureKubelet = false - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.EnableSecureKubelet = helpers.PointerToBool(false) setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -158,7 +158,7 @@ func TestKubeletConfigEnableSecureKubelet(t *testing.T) { } func TestKubeletMaxPods(t *testing.T) { - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = NetworkPluginAzure setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -167,7 +167,7 @@ func TestKubeletMaxPods(t *testing.T) { NetworkPluginAzure, k["--max-pods"]) } - cs = createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs = CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = NetworkPluginKubenet setKubeletConfig(cs) k = cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig @@ -178,7 +178,7 @@ func TestKubeletMaxPods(t *testing.T) { } func TestKubeletCalico(t *testing.T) { - cs := createContainerService("testcluster", defaultTestClusterVer, 3, 2) + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.NetworkPolicy = NetworkPolicyCalico setKubeletConfig(cs) k := cs.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig diff --git a/pkg/acsengine/defaults-scheduler_test.go b/pkg/acsengine/defaults-scheduler_test.go index bae224265d..e350ba95dd 100644 --- a/pkg/acsengine/defaults-scheduler_test.go +++ b/pkg/acsengine/defaults-scheduler_test.go @@ -5,7 +5,7 @@ import ( ) func TestSchedulerDefaultConfig(t *testing.T) { - cs := createContainerService("testcluster", "1.9.6", 3, 2) + cs := CreateMockContainerService("testcluster", "1.9.6", 3, 2, false) setSchedulerConfig(cs) s := cs.Properties.OrchestratorProfile.KubernetesConfig.SchedulerConfig for key, val := range staticSchedulerConfig { @@ -23,7 +23,7 @@ func TestSchedulerDefaultConfig(t *testing.T) { } func TestSchedulerUserConfig(t *testing.T) { - cs := createContainerService("testcluster", "1.9.6", 3, 2) + cs := CreateMockContainerService("testcluster", "1.9.6", 3, 2, false) assignmentMap := map[string]string{ "--scheduler-name": "my-custom-name", "--feature-gates": "APIListChunking=true,APIResponseCompression=true,Accelerators=true,AdvancedAuditing=true", @@ -39,7 +39,7 @@ func TestSchedulerUserConfig(t *testing.T) { } func TestSchedulerStaticConfig(t *testing.T) { - cs := createContainerService("testcluster", "1.9.6", 3, 2) + cs := CreateMockContainerService("testcluster", "1.9.6", 3, 2, false) cs.Properties.OrchestratorProfile.KubernetesConfig.SchedulerConfig = map[string]string{ "--kubeconfig": "user-override", "--leader-elect": "user-override", diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index 8130d3369f..890519cc62 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -15,16 +15,17 @@ import ( "github.com/Azure/acs-engine/pkg/helpers" "github.com/Azure/acs-engine/pkg/openshift/certgen" "github.com/blang/semver" + "github.com/pkg/errors" ) const ( // AzureCniPluginVer specifies version of Azure CNI plugin, which has been mirrored from // https://github.com/Azure/azure-container-networking/releases/download/${AZURE_PLUGIN_VER}/azure-vnet-cni-linux-amd64-${AZURE_PLUGIN_VER}.tgz - // to https://acs-mirror.azureedge.net/cni/ - AzureCniPluginVer = "v1.0.4" + // to https://acs-mirror.azureedge.net/cni + AzureCniPluginVer = "v1.0.7" // CNIPluginVer specifies the version of CNI implementation // https://github.com/containernetworking/plugins - CNIPluginVer = "v0.7.0" + CNIPluginVer = "v0.7.1" ) var ( @@ -67,7 +68,7 @@ var ( ImageOffer: "UbuntuServer", ImageSku: "16.04-LTS", ImagePublisher: "Canonical", - ImageVersion: "16.04.201806120", + ImageVersion: "16.04.201806220", } //DefaultRHELOSImageConfig is the RHEL Linux distribution. @@ -307,10 +308,10 @@ var ( // DefaultNVIDIADevicePluginAddonsConfig is the default NVIDIA Device Plugin Kubernetes addon Config DefaultNVIDIADevicePluginAddonsConfig = api.KubernetesAddon{ - Name: DefaultNVIDIADevicePluginAddonName, + Name: NVIDIADevicePluginAddonName, Containers: []api.KubernetesContainerSpec{ { - Name: DefaultNVIDIADevicePluginAddonName, + Name: NVIDIADevicePluginAddonName, }, }, } @@ -326,7 +327,7 @@ var ( Containers: []api.KubernetesContainerSpec{ { Name: "omsagent", - Image: "microsoft/oms:ciprod06072018", + Image: "microsoft/oms:June21st", CPURequests: "50m", MemoryRequests: "100Mi", CPULimits: "150m", @@ -344,6 +345,16 @@ var ( }, }, } + + // DefaultAzureNetworkPolicyAddonsConfig is the default Azure NetworkPolicy addon config + DefaultAzureNetworkPolicyAddonsConfig = api.KubernetesAddon{ + Name: AzureNetworkPolicyAddonName, + Containers: []api.KubernetesContainerSpec{ + { + Name: AzureNetworkPolicyAddonName, + }, + }, + } ) // setPropertiesDefaults for the container Properties, returns true if certs are generated @@ -394,8 +405,10 @@ func setOrchestratorDefaults(cs *api.ContainerService) { // and set a default network policy enforcement configuration switch o.KubernetesConfig.NetworkPolicy { case NetworkPluginAzure: - o.KubernetesConfig.NetworkPlugin = NetworkPluginAzure - o.KubernetesConfig.NetworkPolicy = DefaultNetworkPolicy + if o.KubernetesConfig.NetworkPlugin == "" { + o.KubernetesConfig.NetworkPlugin = NetworkPluginAzure + o.KubernetesConfig.NetworkPolicy = DefaultNetworkPolicy + } case NetworkPolicyNone: o.KubernetesConfig.NetworkPlugin = NetworkPluginKubenet o.KubernetesConfig.NetworkPolicy = DefaultNetworkPolicy @@ -417,6 +430,7 @@ func setOrchestratorDefaults(cs *api.ContainerService) { DefaultNVIDIADevicePluginAddonsConfig, DefaultContainerMonitoringAddonsConfig, DefaultAzureCNINetworkMonitorAddonsConfig, + DefaultAzureNetworkPolicyAddonsConfig, } enforceK8sAddonOverrides(o.KubernetesConfig.Addons, o) } else { @@ -453,7 +467,7 @@ func setOrchestratorDefaults(cs *api.ContainerService) { m = getAddonsIndexByName(o.KubernetesConfig.Addons, DefaultMetricsServerAddonName) o.KubernetesConfig.Addons[m].Enabled = k8sVersionMetricsServerAddonEnabled(o) } - n := getAddonsIndexByName(o.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonName) + n := getAddonsIndexByName(o.KubernetesConfig.Addons, NVIDIADevicePluginAddonName) if n < 0 { // Provide default acs-engine config for NVIDIA Device Plugin o.KubernetesConfig.Addons = append(o.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonsConfig) @@ -468,6 +482,11 @@ func setOrchestratorDefaults(cs *api.ContainerService) { // Provide default acs-engine config for Azure CNI containernetworking Device Plugin o.KubernetesConfig.Addons = append(o.KubernetesConfig.Addons, DefaultAzureCNINetworkMonitorAddonsConfig) } + aNP := getAddonsIndexByName(o.KubernetesConfig.Addons, AzureNetworkPolicyAddonName) + if aNP < 0 { + // Provide default acs-engine config for Azure NetworkPolicy addon + o.KubernetesConfig.Addons = append(o.KubernetesConfig.Addons, DefaultAzureNetworkPolicyAddonsConfig) + } } if o.KubernetesConfig.KubernetesImageBase == "" { o.KubernetesConfig.KubernetesImageBase = cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase @@ -563,7 +582,7 @@ func setOrchestratorDefaults(cs *api.ContainerService) { if a.OrchestratorProfile.KubernetesConfig.Addons[m].IsEnabled(api.DefaultMetricsServerAddonEnabled) { a.OrchestratorProfile.KubernetesConfig.Addons[m] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[m], DefaultMetricsServerAddonsConfig) } - n := getAddonsIndexByName(a.OrchestratorProfile.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonName) + n := getAddonsIndexByName(a.OrchestratorProfile.KubernetesConfig.Addons, NVIDIADevicePluginAddonName) if a.OrchestratorProfile.KubernetesConfig.Addons[n].IsEnabled(api.DefaultNVIDIADevicePluginAddonEnabled) { a.OrchestratorProfile.KubernetesConfig.Addons[n] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[n], DefaultNVIDIADevicePluginAddonsConfig) } @@ -572,9 +591,13 @@ func setOrchestratorDefaults(cs *api.ContainerService) { a.OrchestratorProfile.KubernetesConfig.Addons[cm] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[cm], DefaultContainerMonitoringAddonsConfig) } aN := getAddonsIndexByName(a.OrchestratorProfile.KubernetesConfig.Addons, AzureCNINetworkMonitoringAddonName) - if a.OrchestratorProfile.KubernetesConfig.Addons[aN].IsEnabled(a.OrchestratorProfile.IsAzureCNI()) { + if a.OrchestratorProfile.KubernetesConfig.Addons[aN].IsEnabled(api.DefaultAzureCNINetworkMonitoringAddonEnabled) { a.OrchestratorProfile.KubernetesConfig.Addons[aN] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[aN], DefaultAzureCNINetworkMonitorAddonsConfig) } + aNP := getAddonsIndexByName(a.OrchestratorProfile.KubernetesConfig.Addons, AzureNetworkPolicyAddonName) + if a.OrchestratorProfile.KubernetesConfig.Addons[aNP].IsEnabled(a.OrchestratorProfile.KubernetesConfig.NetworkPlugin == NetworkPluginAzure && a.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure) { + a.OrchestratorProfile.KubernetesConfig.Addons[aNP] = assignDefaultAddonVals(a.OrchestratorProfile.KubernetesConfig.Addons[aNP], DefaultAzureNetworkPolicyAddonsConfig) + } if o.KubernetesConfig.PrivateCluster == nil { o.KubernetesConfig.PrivateCluster = &api.PrivateCluster{} @@ -861,7 +884,7 @@ func setDefaultCerts(a *api.Properties) (bool, error) { firstMasterIP := net.ParseIP(a.MasterProfile.FirstConsecutiveStaticIP).To4() if firstMasterIP == nil { - return false, fmt.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", a.MasterProfile.FirstConsecutiveStaticIP) + return false, errors.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", a.MasterProfile.FirstConsecutiveStaticIP) } ips := []net.IP{firstMasterIP} @@ -1105,6 +1128,8 @@ func enforceK8sAddonOverrides(addons []api.KubernetesAddon, o *api.OrchestratorP o.KubernetesConfig.Addons[m].Enabled = k8sVersionMetricsServerAddonEnabled(o) aN := getAddonsIndexByName(o.KubernetesConfig.Addons, AzureCNINetworkMonitoringAddonName) o.KubernetesConfig.Addons[aN].Enabled = azureCNINetworkMonitorAddonEnabled(o) + aNP := getAddonsIndexByName(o.KubernetesConfig.Addons, AzureNetworkPolicyAddonName) + o.KubernetesConfig.Addons[aNP].Enabled = azureNetworkPolicyAddonEnabled(o) } func k8sVersionMetricsServerAddonEnabled(o *api.OrchestratorProfile) *bool { @@ -1115,6 +1140,10 @@ func azureCNINetworkMonitorAddonEnabled(o *api.OrchestratorProfile) *bool { return helpers.PointerToBool(o.IsAzureCNI()) } +func azureNetworkPolicyAddonEnabled(o *api.OrchestratorProfile) *bool { + return helpers.PointerToBool(o.KubernetesConfig.NetworkPlugin == NetworkPluginAzure && o.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure) +} + func generateEtcdEncryptionKey() string { b := make([]byte, 32) rand.Read(b) diff --git a/pkg/acsengine/defaults_test.go b/pkg/acsengine/defaults_test.go index f2578c6cc5..250c042f0a 100644 --- a/pkg/acsengine/defaults_test.go +++ b/pkg/acsengine/defaults_test.go @@ -565,23 +565,28 @@ func TestIsAzureCNINetworkmonitorAddon(t *testing.T) { properties := mockCS.Properties properties.OrchestratorProfile.OrchestratorType = "Kubernetes" properties.MasterProfile.Count = 1 + properties.OrchestratorProfile.KubernetesConfig.Addons = []api.KubernetesAddon{ + getMockAddon(AzureCNINetworkMonitoringAddonName), + } properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = "azure" setOrchestratorDefaults(&mockCS) i := getAddonsIndexByName(properties.OrchestratorProfile.KubernetesConfig.Addons, AzureCNINetworkMonitoringAddonName) if !helpers.IsTrueBoolPointer(properties.OrchestratorProfile.KubernetesConfig.Addons[i].Enabled) { - t.Fatalf("Azure CNI network plugin configuration should add Azure CNI networkmonitor addon") + t.Fatalf("Azure CNI networkmonitor addon should be present") } + mockCS = getMockBaseContainerService("1.10.3") properties = mockCS.Properties properties.OrchestratorProfile.OrchestratorType = "Kubernetes" properties.MasterProfile.Count = 1 - properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = "kubenet" + properties.OrchestratorProfile.KubernetesConfig.NetworkPlugin = "azure" + properties.OrchestratorProfile.KubernetesConfig.Addons = []api.KubernetesAddon{} setOrchestratorDefaults(&mockCS) i = getAddonsIndexByName(properties.OrchestratorProfile.KubernetesConfig.Addons, AzureCNINetworkMonitoringAddonName) if helpers.IsTrueBoolPointer(properties.OrchestratorProfile.KubernetesConfig.Addons[i].Enabled) { - t.Fatalf("Azure CNI networkmonitor addon should only be present in Azure CNI configurations") + t.Fatalf("Azure CNI networkmonitor addon should only be present if explicitly configured") } } diff --git a/pkg/acsengine/engine.go b/pkg/acsengine/engine.go index 89b1cd265a..f7391c7158 100644 --- a/pkg/acsengine/engine.go +++ b/pkg/acsengine/engine.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/acs-engine/pkg/api/common" "github.com/Azure/acs-engine/pkg/helpers" "github.com/ghodss/yaml" + "github.com/pkg/errors" ) var commonTemplateFiles = []string{agentOutputs, agentParams, classicParams, masterOutputs, iaasOutputs, masterParams, windowsParams} @@ -32,6 +33,7 @@ var swarmTemplateFiles = []string{swarmBaseFile, swarmParams, swarmAgentResource var swarmModeTemplateFiles = []string{swarmBaseFile, swarmParams, swarmAgentResourcesVMAS, swarmAgentVars, swarmAgentResourcesVMSS, swarmAgentResourcesClassic, swarmBaseFile, swarmMasterResources, swarmMasterVars, swarmWinAgentResourcesVMAS, swarmWinAgentResourcesVMSS} var openshiftTemplateFiles = append( kubernetesTemplateFiles, + openshiftInfraResources, openshiftNodeScript, openshiftMasterScript, openshift39NodeScript, @@ -64,14 +66,14 @@ func GenerateClusterID(properties *api.Properties) string { // GenerateKubeConfig returns a JSON string representing the KubeConfig func GenerateKubeConfig(properties *api.Properties, location string) (string, error) { if properties == nil { - return "", fmt.Errorf("Properties nil in GenerateKubeConfig") + return "", errors.New("Properties nil in GenerateKubeConfig") } if properties.CertificateProfile == nil { - return "", fmt.Errorf("CertificateProfile property may not be nil in GenerateKubeConfig") + return "", errors.New("CertificateProfile property may not be nil in GenerateKubeConfig") } b, err := Asset(kubeConfigJSON) if err != nil { - return "", fmt.Errorf("error reading kube config template file %s: %s", kubeConfigJSON, err.Error()) + return "", errors.Wrapf(err, "error reading kube config template file %s", kubeConfigJSON) } kubeconfig := string(b) // variable replacement @@ -84,7 +86,7 @@ func GenerateKubeConfig(properties *api.Properties, location string) (string, er // more than 1 master, use the internal lb IP firstMasterIP := net.ParseIP(properties.MasterProfile.FirstConsecutiveStaticIP).To4() if firstMasterIP == nil { - return "", fmt.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", properties.MasterProfile.FirstConsecutiveStaticIP) + return "", errors.Errorf("MasterProfile.FirstConsecutiveStaticIP '%s' is an invalid IP address", properties.MasterProfile.FirstConsecutiveStaticIP) } lbIP := net.IP{firstMasterIP[0], firstMasterIP[1], firstMasterIP[2], firstMasterIP[3] + byte(DefaultInternalLbStaticIPOffset)} kubeconfig = strings.Replace(kubeconfig, "{{WrapAsVerbatim \"reference(concat('Microsoft.Network/publicIPAddresses/', variables('masterPublicIPAddressName'))).dnsSettings.fqdn\"}}", lbIP.String(), -1) @@ -270,7 +272,7 @@ func addSecret(m paramsMap, k string, v interface{}, encode bool) { func getStorageAccountType(sizeName string) (string, error) { spl := strings.Split(sizeName, "_") if len(spl) < 2 { - return "", fmt.Errorf("Invalid sizeName: %s", sizeName) + return "", errors.Errorf("Invalid sizeName: %s", sizeName) } capability := spl[1] if strings.Contains(strings.ToLower(capability), "s") { @@ -1200,7 +1202,7 @@ func validateProfileOptedForExtension(extensionName string, profileExtensions [] func getLinkedTemplateTextForURL(rootURL, orchestrator, extensionName, version, query string) (string, error) { supportsExtension, err := orchestratorSupportsExtension(rootURL, orchestrator, extensionName, version, query) if !supportsExtension { - return "", fmt.Errorf("Extension not supported for orchestrator. Error: %s", err) + return "", errors.Wrap(err, "Extension not supported for orchestrator") } templateLinkBytes, err := getExtensionResource(rootURL, extensionName, version, "template-link.json", query) @@ -1220,11 +1222,11 @@ func orchestratorSupportsExtension(rootURL, orchestrator, extensionName, version var supportedOrchestrators []string err = json.Unmarshal(orchestratorBytes, &supportedOrchestrators) if err != nil { - return false, fmt.Errorf("Unable to parse supported-orchestrators.json for Extension %s Version %s", extensionName, version) + return false, errors.Errorf("Unable to parse supported-orchestrators.json for Extension %s Version %s", extensionName, version) } if !stringInSlice(orchestrator, supportedOrchestrators) { - return false, fmt.Errorf("Orchestrator: %s not in list of supported orchestrators for Extension: %s Version %s", orchestrator, extensionName, version) + return false, errors.Errorf("Orchestrator: %s not in list of supported orchestrators for Extension: %s Version %s", orchestrator, extensionName, version) } return true, nil @@ -1235,18 +1237,18 @@ func getExtensionResource(rootURL, extensionName, version, fileName, query strin res, err := http.Get(requestURL) if err != nil { - return nil, fmt.Errorf("Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s Error: %s", extensionName, version, fileName, requestURL, err) + return nil, errors.Wrapf(err, "Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s", extensionName, version, fileName, requestURL) } defer res.Body.Close() if res.StatusCode != 200 { - return nil, fmt.Errorf("Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s StatusCode: %s: Status: %s", extensionName, version, fileName, requestURL, strconv.Itoa(res.StatusCode), res.Status) + return nil, errors.Errorf("Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s StatusCode: %s: Status: %s", extensionName, version, fileName, requestURL, strconv.Itoa(res.StatusCode), res.Status) } body, err := ioutil.ReadAll(res.Body) if err != nil { - return nil, fmt.Errorf("Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s Error: %s", extensionName, version, fileName, requestURL, err) + return nil, errors.Wrapf(err, "Unable to GET extension resource for extension: %s with version %s with filename %s at URL: %s", extensionName, version, fileName, requestURL) } return body, nil diff --git a/pkg/acsengine/k8s_versions.go b/pkg/acsengine/k8s_versions.go index b917303826..57d0fc9c62 100644 --- a/pkg/acsengine/k8s_versions.go +++ b/pkg/acsengine/k8s_versions.go @@ -13,17 +13,19 @@ var k8sComponentVersions = map[string]map[string]string{ "dashboard": "kubernetes-dashboard-amd64:v1.8.3", "exechealthz": "exechealthz-amd64:1.2", "addon-resizer": "addon-resizer:1.8.1", - "heapster": "heapster-amd64:v1.5.1", + "heapster": "heapster-amd64:v1.5.3", "metrics-server": "metrics-server-amd64:v0.2.1", - "kube-dns": "k8s-dns-kube-dns-amd64:1.14.8", + "kube-dns": "k8s-dns-kube-dns-amd64:1.14.10", "addon-manager": "kube-addon-manager-amd64:v8.6", - "dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.8", + "dnsmasq": "k8s-dns-dnsmasq-nanny-amd64:1.14.10", "pause": "pause-amd64:3.1", "tiller": "tiller:v2.8.1", - "rescheduler": "rescheduler:v0.3.1", + "rescheduler": "rescheduler:v0.4.0", "aci-connector": "virtual-kubelet:latest", ContainerMonitoringAddonName: "oms:ciprod05082018", AzureCNINetworkMonitoringAddonName: "networkmonitor:v0.0.4", + "cluster-autoscaler": "cluster-autoscaler:v1.3.0", + NVIDIADevicePluginAddonName: "k8s-device-plugin:1.11", "nodestatusfreq": DefaultKubernetesNodeStatusUpdateFrequency, "nodegraceperiod": DefaultKubernetesCtrlMgrNodeMonitorGracePeriod, "podeviction": DefaultKubernetesCtrlMgrPodEvictionTimeout, @@ -54,7 +56,7 @@ var k8sComponentVersions = map[string]map[string]string{ ContainerMonitoringAddonName: "oms:ciprod05082018", AzureCNINetworkMonitoringAddonName: "networkmonitor:v0.0.4", "cluster-autoscaler": "cluster-autoscaler:v1.2.2", - "nvidia-device-plugin": "k8s-device-plugin:1.10", + NVIDIADevicePluginAddonName: "k8s-device-plugin:1.10", "nodestatusfreq": DefaultKubernetesNodeStatusUpdateFrequency, "nodegraceperiod": DefaultKubernetesCtrlMgrNodeMonitorGracePeriod, "podeviction": DefaultKubernetesCtrlMgrPodEvictionTimeout, @@ -263,6 +265,7 @@ func getK8sVersionComponents(version string, overrides map[string]string) map[st DefaultACIConnectorAddonName: k8sComponentVersions["1.11"]["aci-connector"], ContainerMonitoringAddonName: k8sComponentVersions["1.11"][ContainerMonitoringAddonName], AzureCNINetworkMonitoringAddonName: k8sComponentVersions["1.11"][AzureCNINetworkMonitoringAddonName], + DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.11"]["cluster-autoscaler"], "nodestatusfreq": k8sComponentVersions["1.11"]["nodestatusfreq"], "nodegraceperiod": k8sComponentVersions["1.11"]["nodegraceperiod"], "podeviction": k8sComponentVersions["1.11"]["podeviction"], @@ -275,6 +278,7 @@ func getK8sVersionComponents(version string, overrides map[string]string) map[st "ratelimitbucket": k8sComponentVersions["1.11"]["ratelimitbucket"], "gchighthreshold": k8sComponentVersions["1.11"]["gchighthreshold"], "gclowthreshold": k8sComponentVersions["1.11"]["gclowthreshold"], + NVIDIADevicePluginAddonName: k8sComponentVersions["1.11"][NVIDIADevicePluginAddonName], } case "1.10": ret = map[string]string{ @@ -309,7 +313,7 @@ func getK8sVersionComponents(version string, overrides map[string]string) map[st "gchighthreshold": k8sComponentVersions["1.10"]["gchighthreshold"], "gclowthreshold": k8sComponentVersions["1.10"]["gclowthreshold"], DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.10"]["cluster-autoscaler"], - DefaultNVIDIADevicePluginAddonName: k8sComponentVersions["1.10"]["nvidia-device-plugin"], + NVIDIADevicePluginAddonName: k8sComponentVersions["1.10"][NVIDIADevicePluginAddonName], } case "1.9": ret = map[string]string{ diff --git a/pkg/acsengine/k8s_versions_test.go b/pkg/acsengine/k8s_versions_test.go index a9038d0da7..a63c2a4dd3 100644 --- a/pkg/acsengine/k8s_versions_test.go +++ b/pkg/acsengine/k8s_versions_test.go @@ -29,6 +29,8 @@ func TestGetK8sVersionComponents(t *testing.T) { DefaultACIConnectorAddonName: k8sComponentVersions["1.11"]["aci-connector"], ContainerMonitoringAddonName: k8sComponentVersions["1.11"][ContainerMonitoringAddonName], AzureCNINetworkMonitoringAddonName: k8sComponentVersions["1.11"][AzureCNINetworkMonitoringAddonName], + DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.11"]["cluster-autoscaler"], + NVIDIADevicePluginAddonName: k8sComponentVersions["1.11"][NVIDIADevicePluginAddonName], "nodestatusfreq": k8sComponentVersions["1.11"]["nodestatusfreq"], "nodegraceperiod": k8sComponentVersions["1.11"]["nodegraceperiod"], "podeviction": k8sComponentVersions["1.11"]["podeviction"], @@ -49,6 +51,51 @@ func TestGetK8sVersionComponents(t *testing.T) { } } + oneDotTenDotZero := getK8sVersionComponents("1.10.0", nil) + if oneDotTenDotZero == nil { + t.Fatalf("getK8sVersionComponents() should not return nil for valid version") + } + expected = map[string]string{ + "hyperkube": "hyperkube-amd64:v1.10.0", + "ccm": "cloud-controller-manager-amd64:v1.10.0", + "windowszip": "v1.10.0-1int.zip", + "dockerEngineVersion": k8sComponentVersions["1.10"]["dockerEngine"], + DefaultDashboardAddonName: k8sComponentVersions["1.10"]["dashboard"], + "exechealthz": k8sComponentVersions["1.10"]["exechealthz"], + "addonresizer": k8sComponentVersions["1.10"]["addon-resizer"], + "heapster": k8sComponentVersions["1.10"]["heapster"], + DefaultMetricsServerAddonName: k8sComponentVersions["1.10"]["metrics-server"], + "dns": k8sComponentVersions["1.10"]["kube-dns"], + "addonmanager": k8sComponentVersions["1.10"]["addon-manager"], + "dnsmasq": k8sComponentVersions["1.10"]["dnsmasq"], + "pause": k8sComponentVersions["1.10"]["pause"], + DefaultTillerAddonName: k8sComponentVersions["1.10"]["tiller"], + DefaultReschedulerAddonName: k8sComponentVersions["1.10"]["rescheduler"], + DefaultACIConnectorAddonName: k8sComponentVersions["1.10"]["aci-connector"], + ContainerMonitoringAddonName: k8sComponentVersions["1.10"][ContainerMonitoringAddonName], + AzureCNINetworkMonitoringAddonName: k8sComponentVersions["1.10"][AzureCNINetworkMonitoringAddonName], + DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.10"]["cluster-autoscaler"], + NVIDIADevicePluginAddonName: k8sComponentVersions["1.10"][NVIDIADevicePluginAddonName], + "nodestatusfreq": k8sComponentVersions["1.10"]["nodestatusfreq"], + "nodegraceperiod": k8sComponentVersions["1.10"]["nodegraceperiod"], + "podeviction": k8sComponentVersions["1.10"]["podeviction"], + "routeperiod": k8sComponentVersions["1.10"]["routeperiod"], + "backoffretries": k8sComponentVersions["1.10"]["backoffretries"], + "backoffjitter": k8sComponentVersions["1.10"]["backoffjitter"], + "backoffduration": k8sComponentVersions["1.10"]["backoffduration"], + "backoffexponent": k8sComponentVersions["1.10"]["backoffexponent"], + "ratelimitqps": k8sComponentVersions["1.10"]["ratelimitqps"], + "ratelimitbucket": k8sComponentVersions["1.10"]["ratelimitbucket"], + "gchighthreshold": k8sComponentVersions["1.10"]["gchighthreshold"], + "gclowthreshold": k8sComponentVersions["1.10"]["gclowthreshold"], + } + + for k, v := range oneDotTenDotZero { + if expected[k] != v { + t.Fatalf("getK8sVersionComponents() returned an unexpected map[string]string value for k8s 1.10.0: %s = %s", k, oneDotTenDotZero[k]) + } + } + oneDotNineDotThree := getK8sVersionComponents("1.9.3", nil) if oneDotNineDotThree == nil { t.Fatalf("getK8sVersionComponents() should not return nil for valid version") @@ -70,7 +117,7 @@ func TestGetK8sVersionComponents(t *testing.T) { DefaultTillerAddonName: k8sComponentVersions["1.9"]["tiller"], DefaultReschedulerAddonName: k8sComponentVersions["1.9"]["rescheduler"], DefaultACIConnectorAddonName: k8sComponentVersions["1.9"]["aci-connector"], - ContainerMonitoringAddonName: k8sComponentVersions["1.11"][ContainerMonitoringAddonName], + ContainerMonitoringAddonName: k8sComponentVersions["1.9"][ContainerMonitoringAddonName], AzureCNINetworkMonitoringAddonName: k8sComponentVersions["1.9"][AzureCNINetworkMonitoringAddonName], DefaultClusterAutoscalerAddonName: k8sComponentVersions["1.9"]["cluster-autoscaler"], "nodestatusfreq": k8sComponentVersions["1.9"]["nodestatusfreq"], diff --git a/pkg/acsengine/mocks.go b/pkg/acsengine/mocks.go new file mode 100644 index 0000000000..8831773deb --- /dev/null +++ b/pkg/acsengine/mocks.go @@ -0,0 +1,90 @@ +package acsengine + +import ( + "github.com/Azure/acs-engine/pkg/api" + "github.com/Azure/acs-engine/pkg/helpers" + "github.com/satori/go.uuid" +) + +// CreateMockContainerService returns a mock container service for testing purposes +func CreateMockContainerService(containerServiceName, orchestratorVersion string, masterCount, agentCount int, certs bool) *api.ContainerService { + cs := api.ContainerService{} + cs.ID = uuid.NewV4().String() + cs.Location = "eastus" + cs.Name = containerServiceName + + cs.Properties = &api.Properties{} + + cs.Properties.MasterProfile = &api.MasterProfile{} + cs.Properties.MasterProfile.Count = masterCount + cs.Properties.MasterProfile.DNSPrefix = "testmaster" + cs.Properties.MasterProfile.VMSize = "Standard_D2_v2" + + cs.Properties.AgentPoolProfiles = []*api.AgentPoolProfile{} + agentPool := &api.AgentPoolProfile{} + agentPool.Count = agentCount + agentPool.Name = "agentpool1" + agentPool.VMSize = "Standard_D2_v2" + agentPool.OSType = "Linux" + agentPool.AvailabilityProfile = "AvailabilitySet" + agentPool.StorageProfile = "StorageAccount" + + cs.Properties.AgentPoolProfiles = append(cs.Properties.AgentPoolProfiles, agentPool) + + cs.Properties.LinuxProfile = &api.LinuxProfile{ + AdminUsername: "azureuser", + SSH: struct { + PublicKeys []api.PublicKey `json:"publicKeys"` + }{}, + } + + cs.Properties.LinuxProfile.AdminUsername = "azureuser" + cs.Properties.LinuxProfile.SSH.PublicKeys = append( + cs.Properties.LinuxProfile.SSH.PublicKeys, api.PublicKey{KeyData: "test"}) + + cs.Properties.ServicePrincipalProfile = &api.ServicePrincipalProfile{} + cs.Properties.ServicePrincipalProfile.ClientID = "DEC923E3-1EF1-4745-9516-37906D56DEC4" + cs.Properties.ServicePrincipalProfile.Secret = "DEC923E3-1EF1-4745-9516-37906D56DEC4" + + cs.Properties.OrchestratorProfile = &api.OrchestratorProfile{} + cs.Properties.OrchestratorProfile.OrchestratorType = api.Kubernetes + cs.Properties.OrchestratorProfile.OrchestratorVersion = orchestratorVersion + cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{ + EnableSecureKubelet: helpers.PointerToBool(api.DefaultSecureKubeletEnabled), + EnableRbac: helpers.PointerToBool(api.DefaultRBACEnabled), + EtcdDiskSizeGB: DefaultEtcdDiskSize, + ServiceCIDR: DefaultKubernetesServiceCIDR, + DockerBridgeSubnet: DefaultDockerBridgeSubnet, + DNSServiceIP: DefaultKubernetesDNSServiceIP, + GCLowThreshold: DefaultKubernetesGCLowThreshold, + GCHighThreshold: DefaultKubernetesGCHighThreshold, + MaxPods: DefaultKubernetesMaxPodsVNETIntegrated, + ClusterSubnet: DefaultKubernetesSubnet, + ContainerRuntime: DefaultContainerRuntime, + NetworkPlugin: DefaultNetworkPlugin, + NetworkPolicy: DefaultNetworkPolicy, + EtcdVersion: DefaultEtcdVersion, + KubeletConfig: make(map[string]string), + } + + cs.Properties.CertificateProfile = &api.CertificateProfile{} + if certs { + cs.Properties.CertificateProfile.CaCertificate = "cacert" + cs.Properties.CertificateProfile.CaPrivateKey = "cakey" + cs.Properties.CertificateProfile.KubeConfigCertificate = "kubeconfigcert" + cs.Properties.CertificateProfile.KubeConfigPrivateKey = "kubeconfigkey" + cs.Properties.CertificateProfile.APIServerCertificate = "apiservercert" + cs.Properties.CertificateProfile.APIServerPrivateKey = "apiserverkey" + cs.Properties.CertificateProfile.ClientCertificate = "clientcert" + cs.Properties.CertificateProfile.ClientPrivateKey = "clientkey" + cs.Properties.CertificateProfile.EtcdServerCertificate = "etcdservercert" + cs.Properties.CertificateProfile.EtcdServerPrivateKey = "etcdserverkey" + cs.Properties.CertificateProfile.EtcdClientCertificate = "etcdclientcert" + cs.Properties.CertificateProfile.EtcdClientPrivateKey = "etcdclientkey" + cs.Properties.CertificateProfile.EtcdPeerCertificates = []string{"etcdpeercert1", "etcdpeercert2", "etcdpeercert3", "etcdpeercert4", "etcdpeercert5"} + cs.Properties.CertificateProfile.EtcdPeerPrivateKeys = []string{"etcdpeerkey1", "etcdpeerkey2", "etcdpeerkey3", "etcdpeerkey4", "etcdpeerkey5"} + + } + + return &cs +} diff --git a/pkg/acsengine/output.go b/pkg/acsengine/output.go index dc7e0373a6..4df91cbf76 100644 --- a/pkg/acsengine/output.go +++ b/pkg/acsengine/output.go @@ -114,6 +114,9 @@ func (w *ArtifactWriter) WriteTLSArtifacts(containerService *api.ContainerServic return e } for i := 0; i < properties.MasterProfile.Count; i++ { + if len(properties.CertificateProfile.EtcdPeerPrivateKeys) <= i || len(properties.CertificateProfile.EtcdPeerCertificates) <= i { + return fmt.Errorf("missing etcd peer certificate/key pair") + } k := "etcdpeer" + strconv.Itoa(i) + ".key" if e := f.SaveFileString(artifactsDir, k, properties.CertificateProfile.EtcdPeerPrivateKeys[i]); e != nil { return e diff --git a/pkg/acsengine/output_test.go b/pkg/acsengine/output_test.go new file mode 100644 index 0000000000..29701ed508 --- /dev/null +++ b/pkg/acsengine/output_test.go @@ -0,0 +1,83 @@ +package acsengine + +import ( + "fmt" + "os" + "path" + "testing" + + "github.com/Azure/acs-engine/pkg/i18n" +) + +func TestWriteTLSArtifacts(t *testing.T) { + + cs := CreateMockContainerService("testcluster", defaultTestClusterVer, 1, 2, true) + writer := &ArtifactWriter{ + Translator: &i18n.Translator{ + Locale: nil, + }, + } + dir := "_testoutputdir" + defaultDir := fmt.Sprintf("%s-%s", cs.Properties.OrchestratorProfile.OrchestratorType, GenerateClusterID(cs.Properties)) + defaultDir = path.Join("_output", defaultDir) + defer os.RemoveAll(dir) + defer os.RemoveAll(defaultDir) + + // Generate apimodel and azure deploy artifacts without certs + err := writer.WriteTLSArtifacts(cs, "vlabs", "fake template", "fake parameters", dir, false, false) + + if err != nil { + t.Fatalf("unexpected error trying to write TLS artifacts: %s", err.Error()) + } + + expectedFiles := []string{"apimodel.json", "azuredeploy.json", "azuredeploy.parameters.json"} + + for _, f := range expectedFiles { + if _, err := os.Stat(dir + "/" + f); os.IsNotExist(err) { + t.Fatalf("expected file %s/%s to be generated by WriteTLSArtifacts", dir, f) + } + } + + os.RemoveAll(dir) + + // Generate parameters only and certs + err = writer.WriteTLSArtifacts(cs, "vlabs", "fake template", "fake parameters", "", true, true) + if err != nil { + t.Fatalf("unexpected error trying to write TLS artifacts: %s", err.Error()) + } + + if _, err := os.Stat(defaultDir + "/apimodel.json"); !os.IsNotExist(err) { + t.Fatalf("expected file %s/apimodel.json not to be generated by WriteTLSArtifacts with parametersOnly set to true", defaultDir) + } + + if _, err := os.Stat(defaultDir + "/azuredeploy.json"); !os.IsNotExist(err) { + t.Fatalf("expected file %s/azuredeploy.json not to be generated by WriteTLSArtifacts with parametersOnly set to true", defaultDir) + } + + expectedFiles = []string{"azuredeploy.parameters.json", "ca.crt", "ca.key", "apiserver.crt", "apiserver.key", "client.crt", "client.key", "etcdclient.key", "etcdclient.crt", "etcdserver.crt", "etcdserver.key", "etcdpeer0.crt", "etcdpeer0.key", "kubectlClient.crt", "kubectlClient.key"} + + for _, f := range expectedFiles { + if _, err := os.Stat(defaultDir + "/" + f); os.IsNotExist(err) { + t.Fatalf("expected file %s/%s to be generated by WriteTLSArtifacts", dir, f) + } + } + + kubeDir := path.Join(defaultDir, "kubeconfig") + if _, err := os.Stat(kubeDir + "/" + "kubeconfig.eastus.json"); os.IsNotExist(err) { + t.Fatalf("expected file %s/kubeconfig/kubeconfig.eastus.json to be generated by WriteTLSArtifacts", defaultDir) + } + os.RemoveAll(defaultDir) + + // Generate certs with all kubeconfig locations + cs.Location = "" + err = writer.WriteTLSArtifacts(cs, "vlabs", "fake template", "fake parameters", "", true, false) + if err != nil { + t.Fatalf("unexpected error trying to write TLS artifacts: %s", err.Error()) + } + + for _, region := range AzureLocations { + if _, err := os.Stat(kubeDir + "/" + "kubeconfig." + region + ".json"); os.IsNotExist(err) { + t.Fatalf("expected kubeconfig for region %s to be generated by WriteTLSArtifacts", region) + } + } +} diff --git a/pkg/acsengine/params_k8s.go b/pkg/acsengine/params_k8s.go index 1ca849050d..2dfdae469f 100644 --- a/pkg/acsengine/params_k8s.go +++ b/pkg/acsengine/params_k8s.go @@ -124,13 +124,13 @@ func assignKubernetesParameters(properties *api.Properties, parametersMap params addValue(parametersMap, "kubernetesMetricsServerSpec", cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase+KubeConfigs[k8sVersion][DefaultMetricsServerAddonName]) } } - nvidiaDevicePluginAddon := getAddonByName(properties.OrchestratorProfile.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonName) - c = getAddonContainersIndexByName(nvidiaDevicePluginAddon.Containers, DefaultNVIDIADevicePluginAddonName) + nvidiaDevicePluginAddon := getAddonByName(properties.OrchestratorProfile.KubernetesConfig.Addons, NVIDIADevicePluginAddonName) + c = getAddonContainersIndexByName(nvidiaDevicePluginAddon.Containers, NVIDIADevicePluginAddonName) if c > -1 { if nvidiaDevicePluginAddon.Containers[c].Image != "" { addValue(parametersMap, "kubernetesNVIDIADevicePluginSpec", nvidiaDevicePluginAddon.Containers[c].Image) } else { - addValue(parametersMap, "kubernetesNVIDIADevicePluginSpec", cloudSpecConfig.KubernetesSpecConfig.NVIDIAImageBase+KubeConfigs[k8sVersion][DefaultNVIDIADevicePluginAddonName]) + addValue(parametersMap, "kubernetesNVIDIADevicePluginSpec", cloudSpecConfig.KubernetesSpecConfig.NVIDIAImageBase+KubeConfigs[k8sVersion][NVIDIADevicePluginAddonName]) } } containerMonitoringAddon := getAddonByName(properties.OrchestratorProfile.KubernetesConfig.Addons, ContainerMonitoringAddonName) diff --git a/pkg/acsengine/pki.go b/pkg/acsengine/pki.go index 83883d7e5c..a650cb4204 100644 --- a/pkg/acsengine/pki.go +++ b/pkg/acsengine/pki.go @@ -13,6 +13,8 @@ import ( "net" "time" + "golang.org/x/sync/errgroup" + log "github.com/sirupsen/logrus" ) @@ -58,7 +60,7 @@ func CreatePki(extraFQDNs []string, extraIPs []net.IP, clusterDomain string, caP etcdClientPrivateKey *rsa.PrivateKey etcdPeerCertPairs []*PkiKeyCertPair ) - errors := make(chan error) + var group errgroup.Group var err error caCertificate, err = pemToCertificate(caPair.CertificatePem) @@ -70,62 +72,53 @@ func CreatePki(extraFQDNs []string, extraIPs []net.IP, clusterDomain string, caP return nil, nil, nil, nil, nil, nil, err } - go func() { - var err error + group.Go(func() (err error) { apiServerCertificate, apiServerPrivateKey, err = createCertificate("apiserver", caCertificate, caPrivateKey, false, true, extraFQDNs, extraIPs, nil) - errors <- err - }() + return err + }) - go func() { - var err error + group.Go(func() (err error) { organization := make([]string, 1) organization[0] = "system:masters" clientCertificate, clientPrivateKey, err = createCertificate("client", caCertificate, caPrivateKey, false, false, nil, nil, organization) - errors <- err - }() + return err + }) - go func() { - var err error + group.Go(func() (err error) { organization := make([]string, 1) organization[0] = "system:masters" kubeConfigCertificate, kubeConfigPrivateKey, err = createCertificate("client", caCertificate, caPrivateKey, false, false, nil, nil, organization) - errors <- err - }() + return err + }) - go func() { - var err error + group.Go(func() (err error) { ip := net.ParseIP("127.0.0.1").To4() peerIPs := append(extraIPs, ip) etcdServerCertificate, etcdServerPrivateKey, err = createCertificate("etcdserver", caCertificate, caPrivateKey, true, true, nil, peerIPs, nil) - errors <- err - }() + return err + }) - go func() { - var err error + group.Go(func() (err error) { ip := net.ParseIP("127.0.0.1").To4() peerIPs := append(extraIPs, ip) etcdClientCertificate, etcdClientPrivateKey, err = createCertificate("etcdclient", caCertificate, caPrivateKey, true, false, nil, peerIPs, nil) - errors <- err - }() + return err + }) etcdPeerCertPairs = make([]*PkiKeyCertPair, masterCount) for i := 0; i < masterCount; i++ { - go func(i int) { - var err error + i := i + group.Go(func() (err error) { ip := net.ParseIP("127.0.0.1").To4() peerIPs := append(extraIPs, ip) etcdPeerCertificate, etcdPeerPrivateKey, err := createCertificate("etcdpeer", caCertificate, caPrivateKey, true, false, nil, peerIPs, nil) etcdPeerCertPairs[i] = &PkiKeyCertPair{CertificatePem: string(certificateToPem(etcdPeerCertificate.Raw)), PrivateKeyPem: string(privateKeyToPem(etcdPeerPrivateKey))} - errors <- err - }(i) + return err + }) } - e := make([]error, (masterCount + 5)) - for i := 0; i < len(e); i++ { - e[i] = <-errors - if e[i] != nil { - return nil, nil, nil, nil, nil, nil, e[i] - } + if err := group.Wait(); err != nil { + return nil, nil, nil, nil, nil, nil, err } return &PkiKeyCertPair{CertificatePem: string(certificateToPem(apiServerCertificate.Raw)), PrivateKeyPem: string(privateKeyToPem(apiServerPrivateKey))}, diff --git a/pkg/acsengine/ssh_test.go b/pkg/acsengine/ssh_test.go index fe4f82f96a..cb963336c5 100644 --- a/pkg/acsengine/ssh_test.go +++ b/pkg/acsengine/ssh_test.go @@ -1,87 +1,29 @@ package acsengine import ( - "math/rand" + "os" "testing" - "github.com/Azure/acs-engine/pkg/helpers" - "github.com/Azure/acs-engine/pkg/i18n" ) -func TestCreateSSH(t *testing.T) { - rg := rand.New(rand.NewSource(42)) - - expectedPublicKeyString := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCyx5MHXjJvJAx5DJ9FZNIDa/QTWorSF+Ra21Tz49DQWfdSESnCGFFVBh/MQUFGv5kCenbmqEjsWF177kFOdv1vOTz4sKRlHg7u3I9uCyyZQrWx4X4RdNk7eX+isQVjFXYw2W1rRDUrnK/82qVTv1f0gu1DV4Z7GoIa2jfJ0zBUY3IW0VN9jYaPVuwv4t5y2GwSZF+HBRuOfLfiUgt4+qVFOz4KwRaEBsVfWxlidlT3K3/+ztWpFOmaKIOjQreEWV10ZSo3f9g6j/HdMPtwYvRCtYStbFCRmcbPr9nuR84SAX/4f95KvBAKLnXwb5Bt71D2vAlZSW1Ylv2VbcaZ73+43EpyphYCSg3kOCdwsqE/EU+Swued82SguLALD3mNKbxHGJppFjz3GMyPpJuSH5EE1OANyPxABCwCYycKiNWbOPi3l6o4tMrASYRXi8l3l9JCvioUJ3bXXH6cDpcP4P6QgsuxhwVkUiECU+dbjJXK4gAUVuWKkMOdY7ITh82oU3wOWXbk8K3bdIUp2ylcHeAd2pekGMuaEKGbrXGRiBitCEjl67Bj5opQflgSmI63g8Sa3mKOPGRYMI5MXHMVj4Rns5JFHoENuImrlvrbLv3izAwO61vgN7iK26BwzO7jz92fNOHGviejNWYJyi4vZlq07153NZXP8D2xYTebh9hwHQ==\n" - - expectedPrivateKeyString := `-----BEGIN RSA PRIVATE KEY----- -MIIJKgIBAAKCAgEAsseTB14ybyQMeQyfRWTSA2v0E1qK0hfkWttU8+PQ0Fn3UhEp -whhRVQYfzEFBRr+ZAnp25qhI7Fhde+5BTnb9bzk8+LCkZR4O7tyPbgssmUK1seF+ -EXTZO3l/orEFYxV2MNlta0Q1K5yv/NqlU79X9ILtQ1eGexqCGto3ydMwVGNyFtFT -fY2Gj1bsL+LecthsEmRfhwUbjny34lILePqlRTs+CsEWhAbFX1sZYnZU9yt//s7V -qRTpmiiDo0K3hFlddGUqN3/YOo/x3TD7cGL0QrWErWxQkZnGz6/Z7kfOEgF/+H/e -SrwQCi518G+Qbe9Q9rwJWUltWJb9lW3Gme9/uNxKcqYWAkoN5DgncLKhPxFPksLn -nfNkoLiwCw95jSm8RxiaaRY89xjMj6Sbkh+RBNTgDcj8QAQsAmMnCojVmzj4t5eq -OLTKwEmEV4vJd5fSQr4qFCd211x+nA6XD+D+kILLsYcFZFIhAlPnW4yVyuIAFFbl -ipDDnWOyE4fNqFN8Dll25PCt23SFKdspXB3gHdqXpBjLmhChm61xkYgYrQhI5euw -Y+aKUH5YEpiOt4PEmt5ijjxkWDCOTFxzFY+EZ7OSRR6BDbiJq5b62y794swMDutb -4De4itugcMzu48/dnzThxr4nozVmCcouL2ZatO9edzWVz/A9sWE3m4fYcB0CAwEA -AQKCAgEArQmNvWvm1LvHdsJIxhm3S6iJLNJN2ttVIrt3ljfCPGdXgg8qo7p1vh2X -WVMvoxJ/Pm7Z9pabPmao1PLeMtvooGZ+JRaTh2t4eKjyCki2egCfa/Qc2TiHqZEH -gKhl1mlHZDCOP2xdKkEV9V6K9mwU7YxrqOpmN3CIzQS5SpcmCAfYvU0Nyk/ZFZPE -NvUW6YGf2I1eCIlhCqCcOmm+wPGYVVHp0u7gpBkJoCnEgBCYXEO2NyJqmqSrFZJx -FuvURD1avvXLzrvmxYfdSYHHXBfq40ZdjJ1xvftg+lPyUzcctUDOY+8fcKZlv/UI -IhdZa45ehvGo+sqfE0fRWXhO6V9t9hdHwOq6ZEF2TtaA9qwPpZxiN5BN7G6Vi6Bm -u3HhSCHyEIdySi9/hX3fhDrhPN08NULLhpiKuSiFQesmUxFxWAprMpEyCdx0wva7 -5tZTQQfmVHCoWyVXWNMGTGBA/h8SWquoQWWhpG7UWCt0A0e0kcbegZTQPddxgITe -uqf6GadbajAr6Qwicf5yNH7bVPiD8dGWU07W3t4C0JyLGNLN34aT0OpleSck4dGp -V2UYylQNkf/EmxTY/CCPtNVVKng3CJ+jZvS4MOKvTi+vvsccd8x6BEo9xKetJhAA -SQeNDMu9tEPlZNHC972YNLb+LPm+feqgM2W/qcONtNhPw1INW+ECggEBAOmPO9jz -q6Gm8nNoALteuAD58pJ/suJTfhXbkGBOCG+hazlmk3rGzf9G/cK2jbS3ePoHw7b9 -oJcpoF2L1nUCdwxTJMUS+iyfVRQ4L8lRDC95x3vdBcdgFZUQgEx1L6hKuK5BpZOY -fyvIEmwpW7OpCOEqXeMOq3agR4//uptIyNCzyIPJz43H0dh6m4l+fYy53AOvDAeW -Xk0wERP6bolngkVnz1XbE43UNZqTFkGMF4gjJCbZ+UguOltsZXSPLA+ruRy3oYGn -LVo1ntAf8Ih94F43Y8Doe+VX3y2UJUqQa/ZFG2nu6KeuDWhwRS/XZQSkxrJ0bO2w -6eOCOEqggO7Qz7sCggEBAMP08Q1nPfmwdawEYWqopKeAMh00oMoX14u8UDmYejiH -uBegwzqgmOLfajFMJDnNXTyzxIRIndzrvXzvtFpSHkh29sOXXG9xlGyLWZGcxtzW -ivyTMw/pTg3yjN0qsleRB/o89VOYP2OG+1XjEcie6LNxXUN/wG5gUx8Wumb2c1hW -XBDM6cRbiSuJuINjscUgiHXKQddfu1cVRaNUgP1PGniKydCqdI2rUUQhziTmmj+o -q+dSv6nGRaK3uNhJrhpMlljxy1Mcr9zLP5FM1GjaF+VQ3zHNxDDbXl13rQPpDocw -vu9tAS/J1+vTgKzcHjKnudUWmoNahT3f4/86fc6XJgcCggEBAMK4ry3Goa5JUNPU -vt94LbJqsMlg+9PjxjgU8T7JcBEZpBqcIZL4EqClIEXpCyXC3XKfbJWwyOWeR9wW -DPtKzdQRsZM4qijvwe/0lCqkjqM6RY1IDVxXCEdaFY0pGk2V1nk5tADk4AmxaWKR -7KlR4VxQhSwbe+qP4Hn2vC5gtUQCz8bIR2muUY7JUcmFEslz3zGXDFF7FS4HSAW/ -Ac8+5AZXcS3kU14osXQo8yI82RWgLrDRhBqgp/i227Mc9qAuDEwb8OP2bEJMeBaO -umwhfiEuztTzPvBLnX8Thy+uTsRog12DWKcL3pPXHmevjcIcWqhHltVobOdIFwRo -4nW406cCggEBALmwZ6hy2Ai/DZL3B7VBn93WHicM0v0OwMN6rG8XrWHaQjmprrbk -rlv2qDOU2pMnpx25oBRWl7lcbtBweXBJdsbmbIoF6aL1d1ewaS0R6mQkrcoQVwfR -5pRS7uc56YwPNAcOMs+HazIOHCdUKGr7IrnASEeJTLmLb9j6+aJOEhl4pH+LHk5j -C0YFmKJxG2kYnhc4lVHZNrabwsS2dBEWH5hwtDOXAyGoYTb17dmL6ElAtb1b7aGc -8Cn0fSYAFAp53tLkNe9JNOE+fLtcmb/OQ2ybSRVxzmMZzX82w+37sDetmpFZsxEs -7P5dCwdDAx6vT+q8I6krYy2x9uTJ8aOOGYsCggEAAW9qf3UNuY0IB9kmHF3Oo1gN -s82h0OLpjJkW+5YYC0vYQit4AYNjXw+T+Z6WKOHOG3LIuQVC6Qj4c1+oN6sJi7re -Ey6Zq7/uWmYUpi9C8CbX1clJwany0V2PjGKL94gCIl7vaXS/4ouzzfl8qbF7FjQ4 -Qq/HPWSIC9Z8rKtUDDHeZYaLqvdhqbas/drqCXmeLeYM6Om4lQJdP+zip3Ctulp1 -EPDesL0rH+3s1CKpgkhYdbJ675GFoGoq+X21QaqsdvoXmmuJF9qq9Tq+JaWloUNq -2FWXLhSX02saIdbIheS1fv/LqekXZd8eFXUj7VZ15tPG3SJqORS0pMtxSAJvLw== ------END RSA PRIVATE KEY----- -` - +func TestCreateSaveSSH(t *testing.T) { translator := &i18n.Translator{ Locale: nil, } + username := "test_user" + outputDirectory := "unit_tests" + expectedFile := outputDirectory + "/" + username + "_rsa" - privateKey, publicKey, err := helpers.CreateSSH(rg, translator) - if err != nil { - t.Fatalf("failed to generate SSH: %s", err) - } - privateKeyString := string(privateKeyToPem(privateKey)) + defer os.Remove(expectedFile) - if privateKeyString != expectedPrivateKeyString { - t.Fatalf("Private Key did not match expected format/value") + _, _, err := CreateSaveSSH(username, outputDirectory, translator) + + if err != nil { + t.Fatalf("Unexpected error creating and saving ssh key: %s", err) } - if publicKey != expectedPublicKeyString { - t.Fatalf("Public Key did not match expected format/value") + if _, err := os.Stat(expectedFile); os.IsNotExist(err) { + t.Fatalf("ssh file was not created") } } diff --git a/pkg/acsengine/template_generator.go b/pkg/acsengine/template_generator.go index 788d6866da..95cb5d2403 100644 --- a/pkg/acsengine/template_generator.go +++ b/pkg/acsengine/template_generator.go @@ -3,7 +3,6 @@ package acsengine import ( "bytes" "encoding/base64" - "errors" "fmt" "runtime/debug" "sort" @@ -15,6 +14,7 @@ import ( "github.com/Azure/acs-engine/pkg/api/common" "github.com/Azure/acs-engine/pkg/helpers" "github.com/Azure/acs-engine/pkg/i18n" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -80,7 +80,7 @@ func (t *TemplateGenerator) GenerateTemplate(containerService *api.ContainerServ defer func() { if r := recover(); r != nil { s := debug.Stack() - err = fmt.Errorf("%v - %s", r, s) + err = errors.Errorf("%v - %s", r, s) // invalidate the template and the parameters templateRaw = "" @@ -89,7 +89,7 @@ func (t *TemplateGenerator) GenerateTemplate(containerService *api.ContainerServ }() if !validateDistro(containerService) { - return templateRaw, parametersRaw, certsGenerated, fmt.Errorf("Invalid distro") + return templateRaw, parametersRaw, certsGenerated, errors.New("Invalid distro") } var b bytes.Buffer @@ -188,6 +188,10 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat storagetier, _ := getStorageAccountType(profile.VMSize) buf.WriteString(fmt.Sprintf(",storageprofile=managed,storagetier=%s", storagetier)) } + if isNSeriesSKU(profile) { + accelerator := "nvidia" + buf.WriteString(fmt.Sprintf(",accelerator=%s", accelerator)) + } buf.WriteString(fmt.Sprintf(",kubernetes.azure.com/cluster=%s", rg)) for k, v := range profile.CustomNodeLabels { buf.WriteString(fmt.Sprintf(",%s=%s", k, v)) @@ -598,6 +602,11 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat if e != nil { return "" } + preprovisionCmd := "" + if profile.PreprovisionExtension != nil { + preprovisionCmd = makeAgentExtensionScriptCommands(cs, profile) + } + str = strings.Replace(str, "PREPROVISION_EXTENSION", escapeSingleLine(strings.TrimSpace(preprovisionCmd)), -1) return fmt.Sprintf("\"customData\": \"[base64(concat('%s'))]\",", str) }, "GetMasterSwarmModeCustomData": func() string { @@ -747,8 +756,8 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat rC := getAddonContainersIndexByName(reschedulerAddon.Containers, DefaultReschedulerAddonName) metricsServerAddon := getAddonByName(cs.Properties.OrchestratorProfile.KubernetesConfig.Addons, DefaultMetricsServerAddonName) mC := getAddonContainersIndexByName(metricsServerAddon.Containers, DefaultMetricsServerAddonName) - nvidiaDevicePluginAddon := getAddonByName(cs.Properties.OrchestratorProfile.KubernetesConfig.Addons, DefaultNVIDIADevicePluginAddonName) - nC := getAddonContainersIndexByName(nvidiaDevicePluginAddon.Containers, DefaultNVIDIADevicePluginAddonName) + nvidiaDevicePluginAddon := getAddonByName(cs.Properties.OrchestratorProfile.KubernetesConfig.Addons, NVIDIADevicePluginAddonName) + nC := getAddonContainersIndexByName(nvidiaDevicePluginAddon.Containers, NVIDIADevicePluginAddonName) switch attr { case "kubernetesHyperkubeSpec": val = cs.Properties.OrchestratorProfile.KubernetesConfig.KubernetesImageBase + KubeConfigs[k8sVersion]["hyperkube"] @@ -954,7 +963,7 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat val = nvidiaDevicePluginAddon.Containers[nC].Image } } else { - val = cloudSpecConfig.KubernetesSpecConfig.NVIDIAImageBase + KubeConfigs[k8sVersion][DefaultNVIDIADevicePluginAddonName] + val = cloudSpecConfig.KubernetesSpecConfig.NVIDIAImageBase + KubeConfigs[k8sVersion][NVIDIADevicePluginAddonName] } case "kubernetesReschedulerSpec": if rC > -1 { @@ -1077,7 +1086,12 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat "OpenShiftGetMasterSh": func() (string, error) { masterShAsset := getOpenshiftMasterShAsset(cs.Properties.OrchestratorProfile.OrchestratorVersion) tb := MustAsset(masterShAsset) - t, err := template.New("master").Parse(string(tb)) + t, err := template.New("master").Funcs(template.FuncMap{ + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, + }).Parse(string(tb)) if err != nil { return "", err } @@ -1100,7 +1114,12 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat "OpenShiftGetNodeSh": func(profile *api.AgentPoolProfile) (string, error) { nodeShAsset := getOpenshiftNodeShAsset(cs.Properties.OrchestratorProfile.OrchestratorVersion) tb := MustAsset(nodeShAsset) - t, err := template.New("node").Parse(string(tb)) + t, err := template.New("node").Funcs(template.FuncMap{ + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, + }).Parse(string(tb)) if err != nil { return "", err } @@ -1142,5 +1161,9 @@ func (t *TemplateGenerator) getTemplateFuncMap(cs *api.ContainerService) templat "IsCustomVNET": func() bool { return isCustomVNET(cs.Properties.AgentPoolProfiles) }, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, } } diff --git a/pkg/acsengine/tenantid.go b/pkg/acsengine/tenantid.go index 80fc1a64c2..15e0e26939 100644 --- a/pkg/acsengine/tenantid.go +++ b/pkg/acsengine/tenantid.go @@ -1,12 +1,12 @@ package acsengine import ( - "fmt" "net/http" "regexp" "github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions" "github.com/Azure/go-autorest/autorest/azure" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -25,19 +25,16 @@ func GetTenantID(env azure.Environment, subscriptionID string) (string, error) { // network error etc) subs, err := c.Get(subscriptionID) if subs.Response.Response == nil { - log.Errorf("Request failed: %v", err) - return "", fmt.Errorf("Request failed: %v", err) + return "", errors.Wrap(err, "Request failed") } // Expecting 401 StatusUnauthorized here, just read the header if subs.StatusCode != http.StatusUnauthorized { - log.Errorf("Unexpected response from Get Subscription: %v", subs.StatusCode) - return "", fmt.Errorf("Unexpected response from Get Subscription: %v", subs.StatusCode) + return "", errors.Errorf("Unexpected response from Get Subscription: %v", subs.StatusCode) } hdr := subs.Header.Get(hdrKey) if hdr == "" { - log.Errorf("Header %v not found in Get Subscription response", hdrKey) - return "", fmt.Errorf("Header %v not found in Get Subscription response", hdrKey) + return "", errors.Errorf("Header %v not found in Get Subscription response", hdrKey) } // Example value for hdr: @@ -45,8 +42,7 @@ func GetTenantID(env azure.Environment, subscriptionID string) (string, error) { r := regexp.MustCompile(`authorization_uri=".*/([0-9a-f\-]+)"`) m := r.FindStringSubmatch(hdr) if m == nil { - log.Errorf("Could not find the tenant ID in header: %s %q", hdrKey, hdr) - return "", fmt.Errorf("Could not find the tenant ID in header: %s %q", hdrKey, hdr) + return "", errors.Errorf("Could not find the tenant ID in header: %s %q", hdrKey, hdr) } return m[1], nil } diff --git a/pkg/api/agentPoolOnlyApi/vlabs/types_test.go b/pkg/api/agentPoolOnlyApi/vlabs/types_test.go index 17b5cf1fbc..0cedb47129 100644 --- a/pkg/api/agentPoolOnlyApi/vlabs/types_test.go +++ b/pkg/api/agentPoolOnlyApi/vlabs/types_test.go @@ -7,7 +7,7 @@ import ( func TestAgentPoolProfile(t *testing.T) { // With osType not specified - AgentPoolProfileText := "{\"count\" : 0}" + AgentPoolProfileText := `{ "name": "linuxpool1", "count": 0, "vmSize": "Standard_D2_v2", "availabilityProfile": "AvailabilitySet" }` ap := &AgentPoolProfile{} if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) @@ -17,11 +17,35 @@ func TestAgentPoolProfile(t *testing.T) { t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") } - if ap.OSType != Linux { + if !ap.IsLinux() { t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") } if !ap.IsStorageAccount() { t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != StorageAccount after unmarshal") } + + // With osType specified + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + if !ap.IsCustomVNET() { + t.Fatalf("unexpectedly detected empty AgentPoolProfile.VNetSubnetID after unmarshal") + } } diff --git a/pkg/api/apiloader.go b/pkg/api/apiloader.go index 9745b22267..8706492391 100644 --- a/pkg/api/apiloader.go +++ b/pkg/api/apiloader.go @@ -6,8 +6,6 @@ import ( "io/ioutil" "reflect" - "fmt" - "github.com/Azure/acs-engine/pkg/api/agentPoolOnlyApi/v20170831" "github.com/Azure/acs-engine/pkg/api/agentPoolOnlyApi/v20180331" apvlabs "github.com/Azure/acs-engine/pkg/api/agentPoolOnlyApi/vlabs" @@ -19,6 +17,7 @@ import ( "github.com/Azure/acs-engine/pkg/api/vlabs" "github.com/Azure/acs-engine/pkg/helpers" "github.com/Azure/acs-engine/pkg/i18n" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -85,7 +84,7 @@ func (a *Apiloader) LoadContainerService( } setContainerServiceDefaultsv20160930(containerService) if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(); validate && e != nil { return nil, e @@ -108,7 +107,7 @@ func (a *Apiloader) LoadContainerService( } setContainerServiceDefaultsv20160330(containerService) if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(); validate && e != nil { return nil, e @@ -132,7 +131,7 @@ func (a *Apiloader) LoadContainerService( } setContainerServiceDefaultsv20170131(containerService) if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(); validate && e != nil { return nil, e @@ -155,7 +154,7 @@ func (a *Apiloader) LoadContainerService( } } if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(isUpdate); validate && e != nil { return nil, e @@ -183,7 +182,7 @@ func (a *Apiloader) LoadContainerService( } } if containerService.Properties == nil { - return nil, fmt.Errorf("missing ContainerService Properties") + return nil, errors.New("missing ContainerService Properties") } if e := containerService.Properties.Validate(isUpdate); validate && e != nil { return nil, e diff --git a/pkg/api/common/helper.go b/pkg/api/common/helper.go index 0c1182a75c..924d76e71c 100644 --- a/pkg/api/common/helper.go +++ b/pkg/api/common/helper.go @@ -1,10 +1,10 @@ package common import ( - "fmt" "regexp" "strings" + "github.com/pkg/errors" validator "gopkg.in/go-playground/validator.v9" ) @@ -19,38 +19,38 @@ func HandleValidationErrors(e validator.ValidationErrors) error { "Properties.LinuxProfile", "Properties.ServicePrincipalProfile.ClientID", "Properties.WindowsProfile.AdminUsername", "Properties.WindowsProfile.AdminPassword": - return fmt.Errorf("missing %s", ns) + return errors.Errorf("missing %s", ns) case "Properties.MasterProfile.Count": - return fmt.Errorf("MasterProfile count needs to be 1, 3, or 5") + return errors.New("MasterProfile count needs to be 1, 3, or 5") case "Properties.MasterProfile.OSDiskSizeGB": - return fmt.Errorf("Invalid os disk size of %d specified. The range of valid values are [%d, %d]", err.Value().(int), MinDiskSizeGB, MaxDiskSizeGB) + return errors.Errorf("Invalid os disk size of %d specified. The range of valid values are [%d, %d]", err.Value().(int), MinDiskSizeGB, MaxDiskSizeGB) case "Properties.MasterProfile.IPAddressCount": - return fmt.Errorf("MasterProfile.IPAddressCount needs to be in the range [%d,%d]", MinIPAddressCount, MaxIPAddressCount) + return errors.Errorf("MasterProfile.IPAddressCount needs to be in the range [%d,%d]", MinIPAddressCount, MaxIPAddressCount) case "Properties.MasterProfile.StorageProfile": - return fmt.Errorf("Unknown storageProfile '%s'. Specify either %s or %s", err.Value().(string), StorageAccount, ManagedDisks) + return errors.Errorf("Unknown storageProfile '%s'. Specify either %s or %s", err.Value().(string), StorageAccount, ManagedDisks) default: if strings.HasPrefix(ns, "Properties.AgentPoolProfiles") { switch { case strings.HasSuffix(ns, ".Name") || strings.HasSuffix(ns, "VMSize"): - return fmt.Errorf("missing %s", ns) + return errors.Errorf("missing %s", ns) case strings.HasSuffix(ns, ".Count"): - return fmt.Errorf("AgentPoolProfile count needs to be in the range [%d,%d]", MinAgentCount, MaxAgentCount) + return errors.Errorf("AgentPoolProfile count needs to be in the range [%d,%d]", MinAgentCount, MaxAgentCount) case strings.HasSuffix(ns, ".OSDiskSizeGB"): - return fmt.Errorf("Invalid os disk size of %d specified. The range of valid values are [%d, %d]", err.Value().(int), MinDiskSizeGB, MaxDiskSizeGB) + return errors.Errorf("Invalid os disk size of %d specified. The range of valid values are [%d, %d]", err.Value().(int), MinDiskSizeGB, MaxDiskSizeGB) case strings.Contains(ns, ".Ports"): - return fmt.Errorf("AgentPoolProfile Ports must be in the range[%d, %d]", MinPort, MaxPort) + return errors.Errorf("AgentPoolProfile Ports must be in the range[%d, %d]", MinPort, MaxPort) case strings.HasSuffix(ns, ".StorageProfile"): - return fmt.Errorf("Unknown storageProfile '%s'. Specify either %s or %s", err.Value().(string), StorageAccount, ManagedDisks) + return errors.Errorf("Unknown storageProfile '%s'. Specify either %s or %s", err.Value().(string), StorageAccount, ManagedDisks) case strings.Contains(ns, ".DiskSizesGB"): - return fmt.Errorf("A maximum of %d disks may be specified, The range of valid disk size values are [%d, %d]", MaxDisks, MinDiskSizeGB, MaxDiskSizeGB) + return errors.Errorf("A maximum of %d disks may be specified, The range of valid disk size values are [%d, %d]", MaxDisks, MinDiskSizeGB, MaxDiskSizeGB) case strings.HasSuffix(ns, ".IPAddressCount"): - return fmt.Errorf("AgentPoolProfile.IPAddressCount needs to be in the range [%d,%d]", MinIPAddressCount, MaxIPAddressCount) + return errors.Errorf("AgentPoolProfile.IPAddressCount needs to be in the range [%d,%d]", MinIPAddressCount, MaxIPAddressCount) default: break } } } - return fmt.Errorf("Namespace %s is not caught, %+v", ns, e) + return errors.Errorf("Namespace %s is not caught, %+v", ns, e) } // ValidateDNSPrefix is a helper function to check that a DNS Prefix is valid @@ -61,7 +61,7 @@ func ValidateDNSPrefix(dnsName string) error { return err } if !re.MatchString(dnsName) { - return fmt.Errorf("DNSPrefix '%s' is invalid. The DNSPrefix must contain between 3 and 45 characters and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number. (length was %d)", dnsName, len(dnsName)) + return errors.Errorf("DNSPrefix '%s' is invalid. The DNSPrefix must contain between 3 and 45 characters and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number. (length was %d)", dnsName, len(dnsName)) } return nil } diff --git a/pkg/api/common/net.go b/pkg/api/common/net.go index 728d4b5dde..0766c9bb67 100644 --- a/pkg/api/common/net.go +++ b/pkg/api/common/net.go @@ -1,9 +1,10 @@ package common import ( - "fmt" "net" "regexp" + + "github.com/pkg/errors" ) // CidrFirstIP returns the first IP of the provided subnet. @@ -51,7 +52,7 @@ func GetVNETSubnetIDComponents(vnetSubnetID string) (string, string, string, str } submatches := re.FindStringSubmatch(vnetSubnetID) if len(submatches) != 5 { - return "", "", "", "", fmt.Errorf("Unable to parse vnetSubnetID. Please use a vnetSubnetID with format /subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME") + return "", "", "", "", errors.New("Unable to parse vnetSubnetID. Please use a vnetSubnetID with format /subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME") } return submatches[1], submatches[2], submatches[3], submatches[4], nil } diff --git a/pkg/api/common/versions.go b/pkg/api/common/versions.go index 224d221018..c41cf7be89 100644 --- a/pkg/api/common/versions.go +++ b/pkg/api/common/versions.go @@ -50,6 +50,7 @@ var AllKubernetesSupportedVersions = map[string]bool{ "1.9.6": true, "1.9.7": true, "1.9.8": true, + "1.9.9": true, "1.10.0-beta.2": true, "1.10.0-beta.4": true, "1.10.0-rc.1": true, @@ -58,10 +59,15 @@ var AllKubernetesSupportedVersions = map[string]bool{ "1.10.2": true, "1.10.3": true, "1.10.4": true, + "1.10.5": true, "1.11.0-alpha.1": true, "1.11.0-alpha.2": true, "1.11.0-beta.1": true, "1.11.0-beta.2": true, + "1.11.0-rc.1": true, + "1.11.0-rc.2": true, + "1.11.0-rc.3": true, + "1.11.0": true, } // GetDefaultKubernetesVersion returns the default Kubernetes version, that is the latest patch of the default release diff --git a/pkg/api/common/versions_test.go b/pkg/api/common/versions_test.go index dc9e4f9e0f..15c08dfe4d 100644 --- a/pkg/api/common/versions_test.go +++ b/pkg/api/common/versions_test.go @@ -340,6 +340,11 @@ func Test_GetValidPatchVersion(t *testing.T) { t.Errorf("It is not the default Kubernetes version") } + v = GetValidPatchVersion(Mesos, "1.6.0", false) + if v != "" { + t.Errorf("Expected empty version for unsupported orchType") + } + for version, enabled := range AllKubernetesWindowsSupportedVersions { if enabled { v = GetValidPatchVersion(Kubernetes, version, true) @@ -411,6 +416,13 @@ func TestGetMaxVersion(t *testing.T) { t.Errorf("GetMaxVersion returned the wrong max version, expected %s, got %s", expected, max) } + expected = "1.1.2" + versions = []string{"1.1.1", "1.0.0-alpha.1", expected} + max = GetMaxVersion(versions, true) + if max != expected { + t.Errorf("GetMaxVersion returned the wrong max version, expected %s, got %s", expected, max) + } + expected = "" versions = []string{} max = GetMaxVersion(versions, false) diff --git a/pkg/api/const.go b/pkg/api/const.go index ca5b112475..cb4f3fe697 100644 --- a/pkg/api/const.go +++ b/pkg/api/const.go @@ -107,6 +107,8 @@ const ( DefaultNVIDIADevicePluginAddonEnabled = false // DefaultContainerMonitoringAddonEnabled determines the acs-engine provided default for enabling kubernetes container monitoring addon DefaultContainerMonitoringAddonEnabled = false + // DefaultAzureCNINetworkMonitoringAddonEnabled Azure CNI networkmonitor addon default + DefaultAzureCNINetworkMonitoringAddonEnabled = false // DefaultTillerAddonName is the name of the tiller addon deployment DefaultTillerAddonName = "tiller" // DefaultACIConnectorAddonName is the name of the tiller addon deployment @@ -119,18 +121,20 @@ const ( DefaultReschedulerAddonName = "rescheduler" // DefaultMetricsServerAddonName is the name of the kubernetes metrics server addon deployment DefaultMetricsServerAddonName = "metrics-server" - // DefaultNVIDIADevicePluginAddonName is the name of the NVIDIA device plugin addon deployment - DefaultNVIDIADevicePluginAddonName = "nvidia-device-plugin" + // NVIDIADevicePluginAddonName is the name of the NVIDIA device plugin addon deployment + NVIDIADevicePluginAddonName = "nvidia-device-plugin" // ContainerMonitoringAddonName is the name of the kubernetes Container Monitoring addon deployment ContainerMonitoringAddonName = "container-monitoring" // DefaultPrivateClusterEnabled determines the acs-engine provided default for enabling kubernetes Private Cluster DefaultPrivateClusterEnabled = false - // NetworkPolicyAzure is the string expression for the deprecated NetworkPolicy usage pattern "azure" + // NetworkPolicyAzure is the string expression for Azure CNI network policy manager NetworkPolicyAzure = "azure" // NetworkPolicyNone is the string expression for the deprecated NetworkPolicy usage pattern "none" NetworkPolicyNone = "none" // NetworkPluginKubenet is the string expression for the kubenet NetworkPlugin config NetworkPluginKubenet = "kubenet" + // NetworkPluginAzure is thee string expression for Azure CNI plugin. + NetworkPluginAzure = "azure" ) const ( diff --git a/pkg/api/convertertoapi.go b/pkg/api/convertertoapi.go index 1ec10d2392..66c1894308 100644 --- a/pkg/api/convertertoapi.go +++ b/pkg/api/convertertoapi.go @@ -718,7 +718,8 @@ func setVlabsKubernetesDefaults(vp *vlabs.Properties, api *OrchestratorProfile) if vp.OrchestratorProfile.KubernetesConfig != nil { // Included here for backwards compatibility with deprecated NetworkPolicy usage patterns - if vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure { + if vp.OrchestratorProfile.KubernetesConfig.NetworkPlugin == "" && + vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure { api.KubernetesConfig.NetworkPlugin = vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy api.KubernetesConfig.NetworkPolicy = "" // no-op but included for emphasis } else if vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyNone { diff --git a/pkg/api/orchestrators.go b/pkg/api/orchestrators.go index 8666a8d340..de54e39e8a 100644 --- a/pkg/api/orchestrators.go +++ b/pkg/api/orchestrators.go @@ -1,7 +1,6 @@ package api import ( - "fmt" "strconv" "strings" @@ -9,6 +8,7 @@ import ( "github.com/Azure/acs-engine/pkg/api/v20170930" "github.com/Azure/acs-engine/pkg/api/vlabs" "github.com/blang/semver" + "github.com/pkg/errors" ) type orchestratorsFunc func(*OrchestratorProfile) ([]*OrchestratorVersionProfile, error) @@ -47,10 +47,10 @@ func validate(orchestrator, version string) (string, error) { return OpenShift, nil case orchestrator == "": if version != "" { - return "", fmt.Errorf("Must specify orchestrator for version '%s'", version) + return "", errors.Errorf("Must specify orchestrator for version '%s'", version) } default: - return "", fmt.Errorf("Unsupported orchestrator '%s'", orchestrator) + return "", errors.Errorf("Unsupported orchestrator '%s'", orchestrator) } return "", nil } @@ -120,7 +120,7 @@ func getOrchestratorVersionProfileList(orchestrator, version string) ([]*Orchest // GetOrchestratorVersionProfile returns orchestrator info for upgradable container service func GetOrchestratorVersionProfile(orch *OrchestratorProfile) (*OrchestratorVersionProfile, error) { if orch.OrchestratorVersion == "" { - return nil, fmt.Errorf("Missing Orchestrator Version") + return nil, errors.New("Missing Orchestrator Version") } switch orch.OrchestratorType { case Kubernetes, DCOS: @@ -130,11 +130,11 @@ func GetOrchestratorVersionProfile(orch *OrchestratorProfile) (*OrchestratorVers } // has to be exactly one element per specified orchestrator/version if len(arr) != 1 { - return nil, fmt.Errorf("Umbiguous Orchestrator Versions") + return nil, errors.New("Umbiguous Orchestrator Versions") } return arr[0], nil default: - return nil, fmt.Errorf("Upgrade operation is not supported for '%s'", orch.OrchestratorType) + return nil, errors.Errorf("Upgrade operation is not supported for '%s'", orch.OrchestratorType) } } @@ -159,7 +159,7 @@ func kubernetesInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, } } else { if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("Kubernetes version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("Kubernetes version %s is not supported", csOrch.OrchestratorVersion) } upgrades, err := kubernetesUpgrades(csOrch) @@ -218,7 +218,7 @@ func dcosInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, error } } else { if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("DCOS version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("DCOS version %s is not supported", csOrch.OrchestratorVersion) } // get info for the specified version @@ -265,7 +265,7 @@ func swarmInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, erro } if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("Swarm version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("Swarm version %s is not supported", csOrch.OrchestratorVersion) } return []*OrchestratorVersionProfile{ { @@ -291,7 +291,7 @@ func dockerceInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, e } if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("Docker CE version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("Docker CE version %s is not supported", csOrch.OrchestratorVersion) } return []*OrchestratorVersionProfile{ { @@ -323,7 +323,7 @@ func openShiftInfo(csOrch *OrchestratorProfile) ([]*OrchestratorVersionProfile, } } else { if !isVersionSupported(csOrch) { - return nil, fmt.Errorf("OpenShift version %s is not supported", csOrch.OrchestratorVersion) + return nil, errors.Errorf("OpenShift version %s is not supported", csOrch.OrchestratorVersion) } // TODO: populate OrchestratorVersionProfile.Upgrades diff --git a/pkg/api/strictjson.go b/pkg/api/strictjson.go index ee00396045..a88addcb37 100644 --- a/pkg/api/strictjson.go +++ b/pkg/api/strictjson.go @@ -2,9 +2,10 @@ package api import ( "encoding/json" - "fmt" "reflect" "strings" + + "github.com/pkg/errors" ) func checkJSONKeys(data []byte, types ...reflect.Type) error { @@ -21,7 +22,7 @@ func checkMapKeys(o map[string]interface{}, types ...reflect.Type) error { for k, v := range o { f, present := fieldMap[strings.ToLower(k)] if !present { - return fmt.Errorf("Unknown JSON tag %s", k) + return errors.Errorf("Unknown JSON tag %s", k) } if f.Type.Kind() == reflect.Struct && v != nil { if childMap, exists := v.(map[string]interface{}); exists { diff --git a/pkg/api/types.go b/pkg/api/types.go index 5fc5793b40..f443dc2a9b 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -943,7 +943,7 @@ func (p *Properties) IsNVIDIADevicePluginEnabled() bool { k := p.OrchestratorProfile.KubernetesConfig o := p.OrchestratorProfile for i := range k.Addons { - if k.Addons[i].Name == DefaultNVIDIADevicePluginAddonName { + if k.Addons[i].Name == NVIDIADevicePluginAddonName { nvidiaDevicePluginAddon = k.Addons[i] } } @@ -978,3 +978,9 @@ func (k *KubernetesConfig) PrivateJumpboxProvision() bool { } return false } + +// RequiresDocker returns if the kubernetes settings require docker to be installed. +func (k *KubernetesConfig) RequiresDocker() bool { + runtime := strings.ToLower(k.ContainerRuntime) + return runtime == "docker" || runtime == "" +} diff --git a/pkg/api/types_test.go b/pkg/api/types_test.go index 6a6d346587..5626976f0f 100644 --- a/pkg/api/types_test.go +++ b/pkg/api/types_test.go @@ -898,7 +898,7 @@ func TestIsNVIDIADevicePluginEnabled(t *testing.T) { p.AgentPoolProfiles[0].VMSize = "Standard_D2_v2" p.OrchestratorProfile.KubernetesConfig.Addons = []KubernetesAddon{ { - Name: DefaultNVIDIADevicePluginAddonName, + Name: NVIDIADevicePluginAddonName, Enabled: helpers.PointerToBool(false), }, } diff --git a/pkg/api/v20160330/types_test.go b/pkg/api/v20160330/types_test.go index 7dc5e65abd..87061feab1 100644 --- a/pkg/api/v20160330/types_test.go +++ b/pkg/api/v20160330/types_test.go @@ -1,6 +1,7 @@ package v20160330 import ( + "encoding/json" "testing" ) @@ -18,3 +19,70 @@ func TestIsDCOS(t *testing.T) { t.Fatalf("unexpectedly detected DCOS orchestrator profile from OrchestratorType=%s", kubernetesProfile.OrchestratorType) } } + +func TestAgentPoolProfile(t *testing.T) { + // With osType not specified + AgentPoolProfileText := `{ "name": "linuxpool1", "count": 0, "vmSize": "Standard_D2_v2", "availabilityProfile": "AvailabilitySet" }` + ap := &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsLinux() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") + } + + // With osType specified + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } +} + +func TestMasterProfile(t *testing.T) { + MasterProfileText := `{ "count": 0, "dnsPrefix": "", "vmSize": "Standard_D2_v2" }` + mp := &MasterProfile{} + if e := json.Unmarshal([]byte(MasterProfileText), mp); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for MasterProfile, %+v", e) + } +} + +func TestOrchestratorProfile(t *testing.T) { + OrchestratorProfileText := `{ "orchestratorType": "Mesos" }` + op := &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Swarm" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "DCOS" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Kubernetes" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e == nil { + t.Fatalf("expected unmarshal failure for OrchestratorProfile") + } +} diff --git a/pkg/api/v20160930/types.go b/pkg/api/v20160930/types.go index cdaf13dc77..87b665dafa 100644 --- a/pkg/api/v20160930/types.go +++ b/pkg/api/v20160930/types.go @@ -176,7 +176,7 @@ func (a *AgentPoolProfile) UnmarshalJSON(b []byte) error { return nil } -// JumpboxProfile dscribes properties of the jumpbox setup +// JumpboxProfile describes properties of the jumpbox setup // in the ACS container cluster. type JumpboxProfile struct { OSType OSType `json:"osType,omitempty"` diff --git a/pkg/api/v20160930/types_test.go b/pkg/api/v20160930/types_test.go index 6c6b44d976..3ac851506a 100644 --- a/pkg/api/v20160930/types_test.go +++ b/pkg/api/v20160930/types_test.go @@ -44,7 +44,50 @@ func TestAgentPoolProfile(t *testing.T) { t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") } - if ap.OSType != Linux { + if !ap.IsLinux() { t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") } + + // With osType specified + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } +} + +func TestOrchestratorProfile(t *testing.T) { + OrchestratorProfileText := `{ "orchestratorType": "Mesos" }` + op := &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Swarm" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "DCOS" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Kubernetes" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + + } } diff --git a/pkg/api/v20170131/types_test.go b/pkg/api/v20170131/types_test.go index 98df028cb6..8afb3509dc 100644 --- a/pkg/api/v20170131/types_test.go +++ b/pkg/api/v20170131/types_test.go @@ -44,7 +44,7 @@ func TestAgentPoolProfile(t *testing.T) { t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") } - if ap.OSType != Linux { + if !ap.IsLinux() { t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") } } diff --git a/pkg/api/v20170701/types_test.go b/pkg/api/v20170701/types_test.go index 525a84fe9a..df6b4d79db 100644 --- a/pkg/api/v20170701/types_test.go +++ b/pkg/api/v20170701/types_test.go @@ -23,7 +23,7 @@ func TestMasterProfile(t *testing.T) { func TestAgentPoolProfile(t *testing.T) { // With osType not specified - AgentPoolProfileText := "{\"count\" : 0}" + AgentPoolProfileText := `{"count" : 0, "storageProfile" : "StorageAccount"}` ap := &AgentPoolProfile{} if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) @@ -33,7 +33,69 @@ func TestAgentPoolProfile(t *testing.T) { t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") } - if ap.OSType != Linux { + if !ap.IsLinux() { t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") } + + if !ap.IsStorageAccount() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + // With osType specified + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } +} + +func TestOrchestratorProfile(t *testing.T) { + OrchestratorProfileText := `{ "orchestratorType": "Mesos" }` + op := &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e == nil { + t.Fatalf("expected unmarshal failure for OrchestratorProfile when passing an invalid orchestratorType") + } + + OrchestratorProfileText = `{ "orchestratorType": "Swarm" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "DockerCE" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + if !op.IsSwarmMode() { + t.Fatalf("unexpectedly detected OrchestratorProfile.Type != DockerCE after unmarshal") + + } + + OrchestratorProfileText = `{ "orchestratorType": "DCOS" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Kubernetes" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + + } } diff --git a/pkg/api/vlabs/types.go b/pkg/api/vlabs/types.go index 0ca4e39127..507e36d225 100644 --- a/pkg/api/vlabs/types.go +++ b/pkg/api/vlabs/types.go @@ -633,3 +633,9 @@ func (l *LinuxProfile) HasCustomNodesDNS() bool { func (o *OrchestratorProfile) IsSwarmMode() bool { return o.OrchestratorType == SwarmMode } + +// RequiresDocker returns if the kubernetes settings require docker to be installed. +func (k *KubernetesConfig) RequiresDocker() bool { + runtime := strings.ToLower(k.ContainerRuntime) + return runtime == "docker" || runtime == "" +} diff --git a/pkg/api/vlabs/types_test.go b/pkg/api/vlabs/types_test.go index 2147307fda..cea24440e4 100644 --- a/pkg/api/vlabs/types_test.go +++ b/pkg/api/vlabs/types_test.go @@ -1,6 +1,7 @@ package vlabs import ( + "encoding/json" "testing" ) @@ -41,3 +42,138 @@ func TestKubernetesAddon(t *testing.T) { t.Fatalf("KubernetesAddon.IsEnabled(true) should always return false when Enabled property is set to false") } } + +func TestOrchestratorProfile(t *testing.T) { + OrchestratorProfileText := `{ "orchestratorType": "Mesos" }` + op := &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e == nil { + t.Fatalf("expected unmarshal failure for OrchestratorProfile when passing an invalid orchestratorType") + } + + OrchestratorProfileText = `{ "orchestratorType": "Swarm" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "SwarmMode" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + if !op.IsSwarmMode() { + t.Fatalf("unexpectedly detected OrchestratorProfile.Type != DockerCE after unmarshal") + + } + + OrchestratorProfileText = `{ "orchestratorType": "DCOS" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + } + + OrchestratorProfileText = `{ "orchestratorType": "Kubernetes" }` + op = &OrchestratorProfile{} + if e := json.Unmarshal([]byte(OrchestratorProfileText), op); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for OrchestratorProfile, %+v", e) + + } +} + +func TestAgentPoolProfile(t *testing.T) { + // With osType not specified + AgentPoolProfileText := `{"count" : 0, "storageProfile" : "StorageAccount", "vnetSubnetID" : "1234"}` + ap := &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 0 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsCustomVNET() { + t.Fatalf("unexpectedly detected nil AgentPoolProfile.VNetSubNetID after unmarshal") + } + + if !ap.IsStorageAccount() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + // With osType Windows + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Windows", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsWindows() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Windows after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + // With osType Linux and RHEL distro + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Linux", "distro" : "rhel", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "AvailabilitySet", "storageProfile" : "ManagedDisks", "vnetSubnetID" : "12345" }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsLinux() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") + } + + if !ap.IsRHEL() { + t.Fatalf("unexpectedly detected AgentPoolProfile.Distro != RHEL after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + // With osType Linux and coreos distro + AgentPoolProfileText = `{ "name": "linuxpool1", "osType" : "Linux", "distro" : "coreos", "count": 1, "vmSize": "Standard_D2_v2", +"availabilityProfile": "VirtualMachineScaleSets", "storageProfile" : "ManagedDisks", "diskSizesGB" : [750, 250, 600, 1000] }` + ap = &AgentPoolProfile{} + if e := json.Unmarshal([]byte(AgentPoolProfileText), ap); e != nil { + t.Fatalf("unexpectedly detected unmarshal failure for AgentPoolProfile, %+v", e) + } + + if ap.Count != 1 { + t.Fatalf("unexpectedly detected AgentPoolProfile.Count != 1 after unmarshal") + } + + if !ap.IsLinux() { + t.Fatalf("unexpectedly detected AgentPoolProfile.OSType != Linux after unmarshal") + } + + if !ap.IsCoreOS() { + t.Fatalf("unexpectedly detected AgentPoolProfile.Distro != CoreOS after unmarshal") + } + + if !ap.IsManagedDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.StorageProfile != ManagedDisks after unmarshal") + } + + if !ap.HasDisks() { + t.Fatalf("unexpectedly detected AgentPoolProfile.DiskSizesGB < 0 after unmarshal") + } + + if !ap.IsVirtualMachineScaleSets() { + t.Fatalf("unexpectedly detected AgentPoolProfile.AvailabilitySets != VirtualMachineScaleSets after unmarshal") + } +} diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index 0515cf3a74..229a7ec4e3 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "net/url" + "reflect" "regexp" "strings" "time" @@ -27,7 +28,7 @@ var ( "3.0.0", "3.0.1", "3.0.2", "3.0.3", "3.0.4", "3.0.5", "3.0.6", "3.0.7", "3.0.8", "3.0.9", "3.0.10", "3.0.11", "3.0.12", "3.0.13", "3.0.14", "3.0.15", "3.0.16", "3.0.17", "3.1.0", "3.1.1", "3.1.2", "3.1.2", "3.1.3", "3.1.4", "3.1.5", "3.1.6", "3.1.7", "3.1.8", "3.1.9", "3.1.10", "3.2.0", "3.2.1", "3.2.2", "3.2.3", "3.2.4", "3.2.5", "3.2.6", "3.2.7", "3.2.8", "3.2.9", "3.2.11", "3.2.12", - "3.2.13", "3.2.14", "3.2.15", "3.2.16", "3.3.0", "3.3.1"} + "3.2.13", "3.2.14", "3.2.15", "3.2.16", "3.2.23", "3.3.0", "3.3.1"} networkPluginPlusPolicyAllowed = []k8sNetworkConfig{ { networkPlugin: "", @@ -37,6 +38,10 @@ var ( networkPlugin: "azure", networkPolicy: "", }, + { + networkPlugin: "azure", + networkPolicy: "azure", + }, { networkPlugin: "kubenet", networkPolicy: "", @@ -377,10 +382,23 @@ func (a *Properties) validateAgentPoolProfiles() error { } } + if a.OrchestratorProfile.OrchestratorType == OpenShift { + if (agentPoolProfile.Name == "infra") != (agentPoolProfile.Role == "infra") { + return fmt.Errorf("OpenShift requires that the 'infra' agent pool profile, and no other, should have role 'infra'") + } + } + if e := agentPoolProfile.validateWindows(a.OrchestratorProfile, a.WindowsProfile); agentPoolProfile.OSType == Windows && e != nil { return e } } + + if a.OrchestratorProfile.OrchestratorType == OpenShift { + if !reflect.DeepEqual(profileNames, map[string]bool{"compute": true, "infra": true}) { + return fmt.Errorf("OpenShift requires exactly two agent pool profiles: compute and infra") + } + } + return nil } @@ -409,28 +427,30 @@ func (a *Properties) validateAddons() error { for _, addon := range a.OrchestratorProfile.KubernetesConfig.Addons { switch addon.Name { case "cluster-autoscaler": - if *addon.Enabled && isAvailabilitySets { + if helpers.IsTrueBoolPointer(addon.Enabled) && isAvailabilitySets { return fmt.Errorf("Cluster Autoscaler add-on can only be used with VirtualMachineScaleSets. Please specify \"availabilityProfile\": \"%s\"", VirtualMachineScaleSets) } case "nvidia-device-plugin": - version := common.RationalizeReleaseAndVersion( - a.OrchestratorProfile.OrchestratorType, - a.OrchestratorProfile.OrchestratorRelease, - a.OrchestratorProfile.OrchestratorVersion, - false) - if version == "" { - return fmt.Errorf("the following user supplied OrchestratorProfile configuration is not supported: OrchestratorType: %s, OrchestratorRelease: %s, OrchestratorVersion: %s. Please check supported Release or Version for this build of acs-engine", a.OrchestratorProfile.OrchestratorType, a.OrchestratorProfile.OrchestratorRelease, a.OrchestratorProfile.OrchestratorVersion) - } - sv, err := semver.Make(version) - if err != nil { - return fmt.Errorf("could not validate version %s", version) - } - minVersion, err := semver.Make("1.10.0") - if err != nil { - return fmt.Errorf("could not validate version") - } - if isNSeriesSKU && sv.LT(minVersion) { - return fmt.Errorf("NVIDIA Device Plugin add-on can only be used Kubernetes 1.10 or above. Please specify \"orchestratorRelease\": \"1.10\"") + if helpers.IsTrueBoolPointer(addon.Enabled) { + version := common.RationalizeReleaseAndVersion( + a.OrchestratorProfile.OrchestratorType, + a.OrchestratorProfile.OrchestratorRelease, + a.OrchestratorProfile.OrchestratorVersion, + false) + if version == "" { + return fmt.Errorf("the following user supplied OrchestratorProfile configuration is not supported: OrchestratorType: %s, OrchestratorRelease: %s, OrchestratorVersion: %s. Please check supported Release or Version for this build of acs-engine", a.OrchestratorProfile.OrchestratorType, a.OrchestratorProfile.OrchestratorRelease, a.OrchestratorProfile.OrchestratorVersion) + } + sv, err := semver.Make(version) + if err != nil { + return fmt.Errorf("could not validate version %s", version) + } + minVersion, err := semver.Make("1.10.0") + if err != nil { + return fmt.Errorf("could not validate version") + } + if isNSeriesSKU && sv.LT(minVersion) { + return fmt.Errorf("NVIDIA Device Plugin add-on can only be used Kubernetes 1.10 or above. Please specify \"orchestratorRelease\": \"1.10\"") + } } } } @@ -503,8 +523,8 @@ func (a *Properties) validateVNET() error { func (a *Properties) validateServicePrincipalProfile() error { if a.OrchestratorProfile.OrchestratorType == Kubernetes { - useManagedIdentity := (a.OrchestratorProfile.KubernetesConfig != nil && - a.OrchestratorProfile.KubernetesConfig.UseManagedIdentity) + useManagedIdentity := a.OrchestratorProfile.KubernetesConfig != nil && + a.OrchestratorProfile.KubernetesConfig.UseManagedIdentity if !useManagedIdentity { if a.ServicePrincipalProfile == nil { @@ -772,16 +792,13 @@ func (a *AgentPoolProfile) validateOrchestratorSpecificProperties(orchestratorTy return fmt.Errorf("VirtualMachineScaleSets does not support storage account attached disks. Instead specify 'StorageAccount': '%s' or specify AvailabilityProfile '%s'", ManagedDisks, AvailabilitySet) } } - if len(a.Ports) == 0 && len(a.DNSPrefix) > 0 { - return fmt.Errorf("AgentPoolProfile.Ports must be non empty when AgentPoolProfile.DNSPrefix is specified") - } return nil } func validateKeyVaultSecrets(secrets []KeyVaultSecrets, requireCertificateStore bool) error { for _, s := range secrets { if len(s.VaultCertificates) == 0 { - return fmt.Errorf("Invalid KeyVaultSecrets must have no empty VaultCertificates") + return fmt.Errorf("Valid KeyVaultSecrets must have no empty VaultCertificates") } if s.SourceVault == nil { return fmt.Errorf("missing SourceVault in KeyVaultSecrets") @@ -978,7 +995,7 @@ func (k *KubernetesConfig) Validate(k8sVersion string, hasWindows bool) error { if e := k.validateNetworkPlugin(); e != nil { return e } - if e := k.validateNetworkPolicy(hasWindows); e != nil { + if e := k.validateNetworkPolicy(k8sVersion, hasWindows); e != nil { return e } if e := k.validateNetworkPluginPlusPolicy(); e != nil { @@ -1007,9 +1024,10 @@ func (k *KubernetesConfig) validateNetworkPlugin() error { return nil } -func (k *KubernetesConfig) validateNetworkPolicy(hasWindows bool) error { +func (k *KubernetesConfig) validateNetworkPolicy(k8sVersion string, hasWindows bool) error { networkPolicy := k.NetworkPolicy + networkPlugin := k.NetworkPlugin // Check NetworkPolicy has a valid value. valid := false @@ -1023,6 +1041,10 @@ func (k *KubernetesConfig) validateNetworkPolicy(hasWindows bool) error { return fmt.Errorf("unknown networkPolicy '%s' specified", networkPolicy) } + if networkPolicy == "azure" && networkPlugin == "azure" && !common.IsKubernetesVersionGe(k8sVersion, "1.8.0") { + return fmt.Errorf("networkPolicy azure requires kubernetes version of 1.8 or higher") + } + // Temporary safety check, to be removed when Windows support is added. if (networkPolicy == "calico" || networkPolicy == "cilium" || networkPolicy == "flannel") && hasWindows { return fmt.Errorf("networkPolicy '%s' is not supporting windows agents", networkPolicy) diff --git a/pkg/api/vlabs/validate_test.go b/pkg/api/vlabs/validate_test.go index 569de26395..242d4cd970 100644 --- a/pkg/api/vlabs/validate_test.go +++ b/pkg/api/vlabs/validate_test.go @@ -27,6 +27,9 @@ const ( ValidKubernetesCloudProviderRateLimitBucket = 10 ) +var falseVal = false +var trueVal = true + func Test_OrchestratorProfile_Validate(t *testing.T) { tests := map[string]struct { properties *Properties @@ -44,6 +47,104 @@ func Test_OrchestratorProfile_Validate(t *testing.T) { }, expectedError: "KubernetesConfig can be specified only when OrchestratorType is Kubernetes or OpenShift", }, + "should error when KubernetesConfig has invalid etcd version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + KubernetesConfig: &KubernetesConfig{ + EtcdVersion: "1.0.0", + }, + }, + }, + expectedError: "Invalid etcd version \"1.0.0\", please use one of the following versions: [2.2.5 2.3.0 2.3.1 2.3.2 2.3.3 2.3.4 2.3.5 2.3.6 2.3.7 2.3.8 3.0.0 3.0.1 3.0.2 3.0.3 3.0.4 3.0.5 3.0.6 3.0.7 3.0.8 3.0.9 3.0.10 3.0.11 3.0.12 3.0.13 3.0.14 3.0.15 3.0.16 3.0.17 3.1.0 3.1.1 3.1.2 3.1.2 3.1.3 3.1.4 3.1.5 3.1.6 3.1.7 3.1.8 3.1.9 3.1.10 3.2.0 3.2.1 3.2.2 3.2.3 3.2.4 3.2.5 3.2.6 3.2.7 3.2.8 3.2.9 3.2.11 3.2.12 3.2.13 3.2.14 3.2.15 3.2.16 3.2.23 3.3.0 3.3.1]", + }, + "should error when KubernetesConfig has enableAggregatedAPIs enabled with an invalid version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.6.6", + KubernetesConfig: &KubernetesConfig{ + EnableAggregatedAPIs: true, + }, + }, + }, + expectedError: "enableAggregatedAPIs is only available in Kubernetes version 1.7.0 or greater; unable to validate for Kubernetes version 1.6.6", + }, + "should error when KubernetesConfig has enableAggregatedAPIs enabled and enableRBAC disabled": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.7.0", + KubernetesConfig: &KubernetesConfig{ + EnableAggregatedAPIs: true, + EnableRbac: &falseVal, + }, + }, + }, + expectedError: "enableAggregatedAPIs requires the enableRbac feature as a prerequisite", + }, + "should error when KubernetesConfig has enableDataEncryptionAtRest enabled with invalid version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.6.6", + KubernetesConfig: &KubernetesConfig{ + EnableDataEncryptionAtRest: &trueVal, + }, + }, + }, + expectedError: "enableDataEncryptionAtRest is only available in Kubernetes version 1.7.0 or greater; unable to validate for Kubernetes version 1.6.6", + }, + "should error when KubernetesConfig has enableDataEncryptionAtRest enabled with invalid encryption key": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.7.0", + KubernetesConfig: &KubernetesConfig{ + EnableDataEncryptionAtRest: &trueVal, + EtcdEncryptionKey: "fakeEncryptionKey", + }, + }, + }, + expectedError: "etcdEncryptionKey must be base64 encoded. Please provide a valid base64 encoded value or leave the etcdEncryptionKey empty to auto-generate the value", + }, + "should error when KubernetesConfig has enableEncryptionWithExternalKms enabled with invalid version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.6.6", + KubernetesConfig: &KubernetesConfig{ + EnableEncryptionWithExternalKms: &trueVal, + }, + }, + }, + expectedError: "enableEncryptionWithExternalKms is only available in Kubernetes version 1.10.0 or greater; unable to validate for Kubernetes version 1.6.6", + }, + "should error when KubernetesConfig has enablePodSecurity enabled with invalid settings": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.7.0", + KubernetesConfig: &KubernetesConfig{ + EnablePodSecurityPolicy: &trueVal, + }, + }, + }, + expectedError: "enablePodSecurityPolicy requires the enableRbac feature as a prerequisite", + }, + "should error when KubernetesConfig has enablePodSecurity enabled with invalid version": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "Kubernetes", + OrchestratorVersion: "1.7.0", + KubernetesConfig: &KubernetesConfig{ + EnableRbac: &trueVal, + EnablePodSecurityPolicy: &trueVal, + }, + }, + }, + expectedError: "enablePodSecurityPolicy is only supported in acs-engine for Kubernetes version 1.8.0 or greater; unable to validate for Kubernetes version 1.7.0", + }, "should not error with empty object": { properties: &Properties{ OrchestratorProfile: &OrchestratorProfile{ @@ -52,6 +153,28 @@ func Test_OrchestratorProfile_Validate(t *testing.T) { }, }, }, + "should error when DcosConfig orchestrator has invalid configuration": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "DCOS", + OrchestratorVersion: "1.12.0", + }, + }, + expectedError: "the following OrchestratorProfile configuration is not supported: OrchestratorType: DCOS, OrchestratorRelease: , OrchestratorVersion: 1.12.0. Please check supported Release or Version for this build of acs-engine", + }, + "should error when DcosConfig orchestrator configuration has invalid static IP": { + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: "DCOS", + DcosConfig: &DcosConfig{ + BootstrapProfile: &BootstrapProfile{ + StaticIP: "0.0.0.0.0.0", + }, + }, + }, + }, + expectedError: "DcosConfig.BootstrapProfile.StaticIP '0.0.0.0.0.0' is an invalid IP address", + }, "should error when DcosConfig populated for non-Kubernetes OrchestratorType 1": { properties: &Properties{ OrchestratorProfile: &OrchestratorProfile{ @@ -397,6 +520,15 @@ func Test_KubernetesConfig_Validate(t *testing.T) { if err := c.Validate(k8sVersion, false); err != nil { t.Error("should not error when DNSServiceIP and ServiceCidr are valid") } + + c = KubernetesConfig{ + ClusterSubnet: "192.168.0.1/24", + NetworkPlugin: "azure", + } + + if err := c.Validate(k8sVersion, false); err == nil { + t.Error("should error when ClusterSubnet has a mask of 24 bits or higher") + } } // Tests that apply to 1.6 and later releases @@ -427,40 +559,51 @@ func Test_Properties_ValidateNetworkPolicy(t *testing.T) { p.OrchestratorProfile = &OrchestratorProfile{} p.OrchestratorProfile.OrchestratorType = Kubernetes + k8sVersion := "1.8.0" for _, policy := range NetworkPolicyValues { p.OrchestratorProfile.KubernetesConfig = &KubernetesConfig{} p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = policy - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(false); err != nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, false); err != nil { t.Errorf( - "should not error on networkPolicy=\"%s\"", + "should not error on networkPolicy=\"%s\" on k8sVersion=\"%s\"", policy, + k8sVersion, ) } } p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "not-existing" - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(false); err == nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, false); err == nil { t.Errorf( "should error on invalid networkPolicy", ) } + k8sVersion = "1.7.9" + p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "azure" + p.OrchestratorProfile.KubernetesConfig.NetworkPlugin = "azure" + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, false); err == nil { + t.Errorf( + "should error on azure networkPolicy + azure networkPlugin with k8s version < 1.8.0", + ) + } + p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "calico" - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(true); err == nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, true); err == nil { t.Errorf( "should error on calico for windows clusters", ) } p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "cilium" - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(true); err == nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, true); err == nil { t.Errorf( "should error on cilium for windows clusters", ) } p.OrchestratorProfile.KubernetesConfig.NetworkPolicy = "flannel" - if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(true); err == nil { + if err := p.OrchestratorProfile.KubernetesConfig.validateNetworkPolicy(k8sVersion, true); err == nil { t.Errorf( "should error on flannel for windows clusters", ) @@ -521,10 +664,6 @@ func Test_Properties_ValidateNetworkPluginPlusPolicy(t *testing.T) { networkPlugin: "azure", networkPolicy: "flannel", }, - { - networkPlugin: "azure", - networkPolicy: "azure", - }, { networkPlugin: "kubenet", networkPolicy: "none", @@ -550,6 +689,103 @@ func Test_Properties_ValidateNetworkPluginPlusPolicy(t *testing.T) { } } +func TestProperties_ValidateLinuxProfile(t *testing.T) { + p := getK8sDefaultProperties(true) + p.LinuxProfile.SSH = struct { + PublicKeys []PublicKey `json:"publicKeys" validate:"required,len=1"` + }{ + PublicKeys: []PublicKey{{}}, + } + expectedMsg := "KeyData in LinuxProfile.SSH.PublicKeys cannot be empty string" + err := p.Validate(true) + + if err.Error() != expectedMsg { + t.Errorf("expected error message : %s to be thrown, but got : %s", expectedMsg, err.Error()) + } +} + +func TestProperties_ValidateInvalidExtensions(t *testing.T) { + + p := getK8sDefaultProperties(true) + p.OrchestratorProfile.OrchestratorVersion = "1.10.0" + + p.AgentPoolProfiles = []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: VirtualMachineScaleSets, + Extensions: []Extension{ + { + Name: "extensionName", + SingleOrAll: "single", + Template: "fakeTemplate", + }, + }, + }, + } + err := p.Validate(true) + expectedMsg := "Extensions are currently not supported with VirtualMachineScaleSets. Please specify \"availabilityProfile\": \"AvailabilitySet\"" + + if err.Error() != expectedMsg { + t.Errorf("expected error message : %s to be thrown, but got %s", expectedMsg, err.Error()) + } + +} + +func TestProperties_ValidateInvalidExtensionProfiles(t *testing.T) { + tests := []struct { + extensionProfiles []*ExtensionProfile + expectedErr error + }{ + { + extensionProfiles: []*ExtensionProfile{ + { + Name: "FakeExtensionProfile", + ExtensionParametersKeyVaultRef: &KeyvaultSecretRef{ + VaultID: "", + SecretName: "fakeSecret", + }, + }, + }, + expectedErr: errors.New("the Keyvault ID must be specified for Extension FakeExtensionProfile"), + }, + { + extensionProfiles: []*ExtensionProfile{ + { + Name: "FakeExtensionProfile", + ExtensionParametersKeyVaultRef: &KeyvaultSecretRef{ + VaultID: "fakeVaultID", + SecretName: "", + }, + }, + }, + expectedErr: errors.New("the Keyvault Secret must be specified for Extension FakeExtensionProfile"), + }, + { + extensionProfiles: []*ExtensionProfile{ + { + Name: "FakeExtensionProfile", + ExtensionParametersKeyVaultRef: &KeyvaultSecretRef{ + VaultID: "fakeVaultID", + SecretName: "fakeSecret", + }, + }, + }, + expectedErr: errors.New("Extension FakeExtensionProfile's keyvault secret reference is of incorrect format"), + }, + } + + for _, test := range tests { + p := getK8sDefaultProperties(true) + p.ExtensionProfiles = test.extensionProfiles + err := p.Validate(true) + if !reflect.DeepEqual(err, test.expectedErr) { + t.Errorf("expected error with message : %s, but got %s", test.expectedErr.Error(), err.Error()) + } + } +} + func Test_ServicePrincipalProfile_ValidateSecretOrKeyvaultSecretRef(t *testing.T) { t.Run("ServicePrincipalProfile with secret should pass", func(t *testing.T) { @@ -650,12 +886,7 @@ func TestValidateKubernetesLabelKey(t *testing.T) { } func Test_AadProfile_Validate(t *testing.T) { - properties := &Properties{ - AADProfile: &AADProfile{}, - OrchestratorProfile: &OrchestratorProfile{ - OrchestratorType: Kubernetes, - }, - } + properties := getK8sDefaultProperties(false) t.Run("Valid aadProfile should pass", func(t *testing.T) { for _, aadProfile := range []*AADProfile{ { @@ -689,14 +920,79 @@ func Test_AadProfile_Validate(t *testing.T) { ServerAppID: "403f018b-4d89-495b-b548-0cf9868cdb0a", TenantID: "1", }, + { + ClientAppID: "92444486-5bc3-4291-818b-d53ae480991b", + ServerAppID: "403f018b-4d89-495b-b548-0cf9868cdb0a", + TenantID: "feb784f6-7174-46da-aeae-da66e80c7a11", + AdminGroupID: "1", + }, {}, } { properties.AADProfile = aadProfile - if err := properties.validateAADProfile(); err == nil { + if err := properties.Validate(true); err == nil { t.Errorf("error should have occurred") } } }) + + t.Run("aadProfiles should not be supported non-Kubernetes orchestrators", func(t *testing.T) { + properties.OrchestratorProfile = &OrchestratorProfile{ + OrchestratorType: OpenShift, + } + properties.AADProfile = &AADProfile{ + ClientAppID: "92444486-5bc3-4291-818b-d53ae480991b", + ServerAppID: "403f018b-4d89-495b-b548-0cf9868cdb0a", + } + expectedMsg := "'aadProfile' is only supported by orchestrator 'Kubernetes'" + if err := properties.validateAADProfile(); err == nil || err.Error() != expectedMsg { + t.Errorf("error should have occurred with msg : %s, but got : %s", expectedMsg, err.Error()) + } + }) +} + +func TestValidateProperties_AzProfile(t *testing.T) { + p := getK8sDefaultProperties(false) + + t.Run("It returns error for unsupported orchestratorTypes", func(t *testing.T) { + p.OrchestratorProfile = &OrchestratorProfile{ + OrchestratorType: Kubernetes, + } + p.AzProfile = &AzProfile{ + TenantID: "tenant_id", + SubscriptionID: "sub_id", + ResourceGroup: "rg1", + } + expectedMsg := "'azProfile' is only supported by orchestrator 'OpenShift'" + if err := p.Validate(false); err == nil || err.Error() != expectedMsg { + t.Errorf("expected error to be thrown with message : %s", expectedMsg) + } + }) + + t.Run("It should return an error for incomplete azProfile details", func(t *testing.T) { + p.OrchestratorProfile = &OrchestratorProfile{ + OrchestratorType: OpenShift, + OpenShiftConfig: validOpenShiftConifg(), + } + p.AzProfile = &AzProfile{ + TenantID: "tenant_id", + SubscriptionID: "sub_id", + ResourceGroup: "", + } + expectedMsg := "'azProfile' must be supplied in full for orchestrator 'OpenShift'" + if err := p.validateAzProfile(); err == nil || err.Error() != expectedMsg { + t.Errorf("expected error to be thrown with message : %s", err.Error()) + } + }) + +} + +func TestProperties_ValidateInvalidStruct(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile = &OrchestratorProfile{} + expectedMsg := "missing Properties.OrchestratorProfile.OrchestratorType" + if err := p.Validate(false); err == nil || err.Error() != expectedMsg { + t.Errorf("expected validation error with message : %s", err.Error()) + } } func getK8sDefaultProperties(hasWindows bool) *Properties { @@ -952,8 +1248,18 @@ func TestValidateImageNameAndGroup(t *testing.T) { }, } + p := getK8sDefaultProperties(true) for _, test := range tests { - gotErr := test.image.validateImageNameAndGroup() + p.AgentPoolProfiles = []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + ImageRef: &test.image, + }, + } + gotErr := p.validateAgentPoolProfiles() if !reflect.DeepEqual(gotErr, test.expectedErr) { t.Logf("scenario %q", test.name) t.Errorf("expected error: %v, got: %v", test.expectedErr, gotErr) @@ -1017,6 +1323,18 @@ func TestMasterProfileValidate(t *testing.T) { Count: 1, }, }, + { + orchestratorType: Kubernetes, + masterProfile: MasterProfile{ + DNSPrefix: "dummy", + Count: 3, + ImageRef: &ImageReference{ + Name: "", + ResourceGroup: "rg", + }, + }, + expectedErr: "imageName needs to be specified when imageResourceGroup is provided", + }, } for i, test := range tests { @@ -1034,6 +1352,155 @@ func TestMasterProfileValidate(t *testing.T) { } } +func TestProperties_ValidateAddon(t *testing.T) { + p := getK8sDefaultProperties(true) + p.AgentPoolProfiles = []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_NC6", + Count: 1, + AvailabilityProfile: AvailabilitySet, + }, + } + p.OrchestratorProfile.OrchestratorVersion = "1.9.0" + p.OrchestratorProfile.KubernetesConfig = &KubernetesConfig{ + Addons: []KubernetesAddon{ + { + Name: "nvidia-device-plugin", + Enabled: &trueVal, + }, + }, + } + + err := p.Validate(true) + expectedMsg := "NVIDIA Device Plugin add-on can only be used Kubernetes 1.10 or above. Please specify \"orchestratorRelease\": \"1.10\"" + if err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got : %s", expectedMsg, err.Error()) + } +} + +func TestProperties_ValidateVNET(t *testing.T) { + validVNetSubnetID := "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME" + validVNetSubnetID2 := "/subscriptions/SUB_ID2/resourceGroups/RG_NAME2/providers/Microsoft.Network/virtualNetworks/VNET_NAME2/subnets/SUBNET_NAME" + + p := getK8sDefaultProperties(true) + tests := []struct { + masterProfile *MasterProfile + agentPoolProfiles []*AgentPoolProfile + expectedMsg string + }{ + { + masterProfile: &MasterProfile{ + VnetSubnetID: "testvnetstring", + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: "", + }, + }, + expectedMsg: "Multiple VNET Subnet configurations specified. The master profile and each agent pool profile must all specify a custom VNET Subnet, or none at all", + }, + { + masterProfile: &MasterProfile{ + VnetSubnetID: "testvnetstring", + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: "testvnetstring", + }, + }, + expectedMsg: "Unable to parse vnetSubnetID. Please use a vnetSubnetID with format /subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME", + }, + { + masterProfile: &MasterProfile{ + VnetSubnetID: validVNetSubnetID, + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: validVNetSubnetID, + }, + { + Name: "agentpool2", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: validVNetSubnetID2, + }, + }, + expectedMsg: "Multiple VNETS specified. The master profile and each agent pool must reference the same VNET (but it is ok to reference different subnets on that VNET)", + }, + { + masterProfile: &MasterProfile{ + VnetSubnetID: validVNetSubnetID, + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + FirstConsecutiveStaticIP: "10.0.0.invalid", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: validVNetSubnetID, + }, + }, + expectedMsg: "MasterProfile.FirstConsecutiveStaticIP (with VNET Subnet specification) '10.0.0.invalid' is an invalid IP address", + }, + { + masterProfile: &MasterProfile{ + VnetSubnetID: validVNetSubnetID, + Count: 1, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + FirstConsecutiveStaticIP: "10.0.0.1", + VnetCidr: "10.1.0.0/invalid", + }, + agentPoolProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_D2_v2", + Count: 1, + AvailabilityProfile: AvailabilitySet, + VnetSubnetID: validVNetSubnetID, + }, + }, + expectedMsg: "MasterProfile.VnetCidr '10.1.0.0/invalid' contains invalid cidr notation", + }, + } + + for _, test := range tests { + p.MasterProfile = test.masterProfile + p.AgentPoolProfiles = test.agentPoolProfiles + err := p.Validate(true) + if err.Error() != test.expectedMsg { + t.Errorf("expected error message : %s, but got %s", test.expectedMsg, err.Error()) + } + } +} + func TestOpenshiftValidate(t *testing.T) { tests := []struct { name string @@ -1074,6 +1541,14 @@ func TestOpenshiftValidate(t *testing.T) { StorageProfile: ManagedDisks, AvailabilityProfile: AvailabilitySet, }, + { + Name: "infra", + Role: "infra", + Count: 1, + VMSize: "Standard_D4s_v3", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, }, LinuxProfile: &LinuxProfile{ AdminUsername: "admin", @@ -1186,6 +1661,32 @@ func TestOpenshiftValidate(t *testing.T) { } } +func TestWindowsProfile_Validate(t *testing.T) { + w := &WindowsProfile{} + w.WindowsImageSourceURL = "http://fakeWindowsImageSourceURL" + err := w.Validate("Mesos") + expectedMsg := "Windows Custom Images are only supported if the Orchestrator Type is DCOS or Kubernetes" + if err.Error() != expectedMsg { + t.Errorf("should error on unsupported orchType with msg : %s, but got : %s", expectedMsg, err.Error()) + } + + w.AdminUsername = "" + w.AdminPassword = "password" + err = w.Validate(Kubernetes) + expectedMsg = "WindowsProfile.AdminUsername is required, when agent pool specifies windows" + if err.Error() != expectedMsg { + t.Errorf("should error on unsupported orchType with msg : %s, but got : %s", expectedMsg, err.Error()) + } + + w.AdminUsername = "azureuser" + w.AdminPassword = "" + err = w.Validate(Kubernetes) + expectedMsg = "WindowsProfile.AdminPassword is required, when agent pool specifies windows" + if err.Error() != expectedMsg { + t.Errorf("should error on unsupported orchType with msg : %s, but got : %s", expectedMsg, err.Error()) + } +} + // validOpenShiftConifg returns a valid OpenShift config that can be use for validation tests. func validOpenShiftConifg() *OpenShiftConfig { return &OpenShiftConfig{ @@ -1193,3 +1694,324 @@ func validOpenShiftConifg() *OpenShiftConfig { ClusterPassword: "bar", } } + +func TestValidateAgentPoolProfiles(t *testing.T) { + tests := []struct { + name string + properties *Properties + expectedErr error + }{ + { + name: "valid", + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: OpenShift, + }, + AgentPoolProfiles: []*AgentPoolProfile{ + { + Name: "compute", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + { + Name: "infra", + Role: "infra", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + }, + }, + expectedErr: nil, + }, + { + name: "invalid - role wrong", + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: OpenShift, + }, + AgentPoolProfiles: []*AgentPoolProfile{ + { + Name: "compute", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + { + Name: "infra", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + }, + }, + expectedErr: errors.New("OpenShift requires that the 'infra' agent pool profile, and no other, should have role 'infra'"), + }, + { + name: "invalid - profiles misnamed", + properties: &Properties{ + OrchestratorProfile: &OrchestratorProfile{ + OrchestratorType: OpenShift, + }, + AgentPoolProfiles: []*AgentPoolProfile{ + { + Name: "bad", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + { + Name: "infra", + Role: "infra", + StorageProfile: ManagedDisks, + AvailabilityProfile: AvailabilitySet, + }, + }, + }, + expectedErr: errors.New("OpenShift requires exactly two agent pool profiles: compute and infra"), + }, + } + + for _, test := range tests { + gotErr := test.properties.validateAgentPoolProfiles() + if !reflect.DeepEqual(test.expectedErr, gotErr) { + t.Logf("running scenario %q", test.name) + t.Errorf("expected error: %v\ngot error: %v", test.expectedErr, gotErr) + } + } +} + +func TestValidate_VaultKeySecrets(t *testing.T) { + + tests := []struct { + secrets []KeyVaultSecrets + expectedErr error + }{ + { + secrets: []KeyVaultSecrets{ + { + SourceVault: &KeyVaultID{ + ID: "0a0b0c0d0e0f", + }, + VaultCertificates: []KeyVaultCertificate{}, + }, + }, + expectedErr: errors.New("Valid KeyVaultSecrets must have no empty VaultCertificates"), + }, + { + secrets: []KeyVaultSecrets{ + { + SourceVault: &KeyVaultID{}, + VaultCertificates: []KeyVaultCertificate{ + { + CertificateURL: "dummyURL", + CertificateStore: "dummyCertStore", + }, + }, + }, + }, + expectedErr: errors.New("KeyVaultSecrets must have a SourceVault.ID"), + }, + { + secrets: []KeyVaultSecrets{ + { + VaultCertificates: []KeyVaultCertificate{ + { + CertificateURL: "dummyURL", + CertificateStore: "dummyCertStore", + }, + }, + }, + }, + expectedErr: errors.New("missing SourceVault in KeyVaultSecrets"), + }, + { + secrets: []KeyVaultSecrets{ + { + SourceVault: &KeyVaultID{ + ID: "0a0b0c0d0e0f", + }, + VaultCertificates: []KeyVaultCertificate{ + { + CertificateURL: "dummyUrl", + CertificateStore: "", + }, + }, + }, + }, + expectedErr: errors.New("KeyVaultCertificate.CertificateStore must be a non-empty value for certificates in a WindowsProfile"), + }, + } + + for _, test := range tests { + err := validateKeyVaultSecrets(test.secrets, true) + if !reflect.DeepEqual(err, test.expectedErr) { + t.Errorf("expected error to be thrown with msg : %s", test.expectedErr.Error()) + } + } +} + +func TestValidateProperties_OrchestratorSpecificProperties(t *testing.T) { + t.Run("Should not support DNS prefix for Kubernetes orchestrators", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DNSPrefix = "sampleprefix" + expectedMsg := "AgentPoolProfile.DNSPrefix must be empty for Kubernetes" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s", expectedMsg) + } + }) + + t.Run("Should not contain agentPool ports for Kubernetes orchestrators", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].Ports = []int{80, 443, 8080} + expectedMsg := "AgentPoolProfile.Ports must be empty for Kubernetes" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should not support ScaleSetEviction policies with regular priority", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].Ports = []int{} + agentPoolProfiles[0].ScaleSetPriority = "Regular" + agentPoolProfiles[0].ScaleSetEvictionPolicy = "Deallocate" + expectedMsg := "property 'AgentPoolProfile.ScaleSetEvictionPolicy' must be empty for AgentPoolProfile.Priority of Regular" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should contain a valid DNS prefix", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DNSPrefix = "invalid_prefix" + expectedMsg := "DNSPrefix 'invalid_prefix' is invalid. The DNSPrefix must contain between 3 and 45 characters and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number. (length was 14)" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should not contain ports when DNS prefix is empty", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].Ports = []int{80, 443} + expectedMsg := "AgentPoolProfile.Ports must be empty when AgentPoolProfile.DNSPrefix is empty for Orchestrator: OpenShift" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should contain unique ports", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].Ports = []int{80, 443, 80} + agentPoolProfiles[0].DNSPrefix = "sampleprefix" + expectedMsg := "agent profile 'agentpool' has duplicate port '80', ports must be unique" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should contain valid Storage Profile", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DiskSizesGB = []int{512, 256, 768} + agentPoolProfiles[0].DNSPrefix = "sampleprefix" + expectedMsg := "property 'StorageProfile' must be set to either 'StorageAccount' or 'ManagedDisks' when attaching disks" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should contain valid Availability Profile", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DiskSizesGB = []int{512, 256, 768} + agentPoolProfiles[0].StorageProfile = "ManagedDisks" + agentPoolProfiles[0].AvailabilityProfile = "InvalidAvailabilityProfile" + expectedMsg := "property 'AvailabilityProfile' must be set to either 'VirtualMachineScaleSets' or 'AvailabilitySet' when attaching disks" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should not support both VirtualMachineScaleSets and StorageAccount", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].DiskSizesGB = []int{512, 256, 768} + agentPoolProfiles[0].StorageProfile = "StorageAccount" + agentPoolProfiles[0].AvailabilityProfile = "VirtualMachineScaleSets" + expectedMsg := "VirtualMachineScaleSets does not support storage account attached disks. Instead specify 'StorageAccount': 'ManagedDisks' or specify AvailabilityProfile 'AvailabilitySet'" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) +} + +func TestValidateProperties_CustomNodeLabels(t *testing.T) { + + t.Run("Should throw error for invalid Kubernetes Label Keys", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].CustomNodeLabels = map[string]string{ + "a/b/c": "a", + } + expectedMsg := "Label key 'a/b/c' is invalid. Valid label keys have two segments: an optional prefix and name, separated by a slash (/). The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots (.), not longer than 253 characters in total, followed by a slash (/)" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should throw error for invalid Kubernetes Label Values", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].CustomNodeLabels = map[string]string{ + "fookey": "b$$a$$r", + } + expectedMsg := "Label value 'b$$a$$r' is invalid. Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should not support orchestratorTypes other than Kubernetes/DCOS", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = SwarmMode + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].CustomNodeLabels = map[string]string{ + "foo": "bar", + } + expectedMsg := "Agent CustomNodeLabels are only supported for DCOS and Kubernetes" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) +} + +func TestAgentPoolProfile_ValidateAvailabilityProfile(t *testing.T) { + t.Run("Should fail for invalid availability profile", func(t *testing.T) { + p := getK8sDefaultProperties(false) + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].AvailabilityProfile = "InvalidAvailabilityProfile" + expectedMsg := "unknown availability profile type 'InvalidAvailabilityProfile' for agent pool 'agentpool'. Specify either AvailabilitySet, or VirtualMachineScaleSets" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) + + t.Run("Should fail when using VirtualMachineScalesets with Openshift", func(t *testing.T) { + p := getK8sDefaultProperties(false) + p.OrchestratorProfile.OrchestratorType = OpenShift + agentPoolProfiles := p.AgentPoolProfiles + agentPoolProfiles[0].AvailabilityProfile = VirtualMachineScaleSets + expectedMsg := "Only AvailabilityProfile: AvailabilitySet is supported for Orchestrator 'OpenShift'" + if err := p.validateAgentPoolProfiles(); err.Error() != expectedMsg { + t.Errorf("expected error with message : %s, but got %s", expectedMsg, err.Error()) + } + }) +} diff --git a/pkg/armhelpers/azureclient.go b/pkg/armhelpers/azureclient.go index 0a48e08c89..275ea1b222 100644 --- a/pkg/armhelpers/azureclient.go +++ b/pkg/armhelpers/azureclient.go @@ -23,6 +23,7 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/to" "github.com/mitchellh/go-homedir" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/Azure/acs-engine/pkg/acsengine" @@ -75,7 +76,7 @@ func NewAzureClientWithDeviceAuth(env azure.Environment, subscriptionID string) home, err := homedir.Dir() if err != nil { - return nil, fmt.Errorf("Failed to get user home directory to look for cached token: %q", err) + return nil, errors.Wrap(err, "Failed to get user home directory to look for cached token") } cachePath := filepath.Join(home, ApplicationDir, "cache", fmt.Sprintf("%s_%s.token.json", tenantID, acsEngineClientID)) @@ -157,22 +158,22 @@ func NewAzureClientWithClientSecret(env azure.Environment, subscriptionID, clien func NewAzureClientWithClientCertificateFile(env azure.Environment, subscriptionID, clientID, certificatePath, privateKeyPath string) (*AzureClient, error) { certificateData, err := ioutil.ReadFile(certificatePath) if err != nil { - return nil, fmt.Errorf("Failed to read certificate: %q", err) + return nil, errors.Wrap(err, "Failed to read certificate") } block, _ := pem.Decode(certificateData) if block == nil { - return nil, fmt.Errorf("Failed to decode pem block from certificate") + return nil, errors.New("Failed to decode pem block from certificate") } certificate, err := x509.ParseCertificate(block.Bytes) if err != nil { - return nil, fmt.Errorf("Failed to parse certificate: %q", err) + return nil, errors.Wrap(err, "Failed to parse certificate") } privateKey, err := parseRsaPrivateKey(privateKeyPath) if err != nil { - return nil, fmt.Errorf("Failed to parse rsa private key: %q", err) + return nil, errors.Wrap(err, "Failed to parse rsa private key") } return NewAzureClientWithClientCertificate(env, subscriptionID, clientID, certificate, privateKey) @@ -186,11 +187,11 @@ func NewAzureClientWithClientCertificate(env azure.Environment, subscriptionID, } if certificate == nil { - return nil, fmt.Errorf("certificate should not be nil") + return nil, errors.New("certificate should not be nil") } if privateKey == nil { - return nil, fmt.Errorf("privateKey should not be nil") + return nil, errors.New("privateKey should not be nil") } armSpt, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, clientID, certificate, privateKey, env.ServiceManagementEndpoint) @@ -231,7 +232,7 @@ func tryLoadCachedToken(cachePath string) (*adal.Token, error) { token, err := adal.LoadToken(cachePath) if err != nil { - return nil, fmt.Errorf("Failed to load token from file: %v", err) + return nil, errors.Wrap(err, "Failed to load token from file") } return token, nil @@ -313,7 +314,7 @@ func (az *AzureClient) EnsureProvidersRegistered(subscriptionID string) error { return err } if registeredProviders.Value == nil { - return fmt.Errorf("Providers list was nil. subscription=%q", subscriptionID) + return errors.Errorf("Providers list was nil. subscription=%q", subscriptionID) } m := make(map[string]bool) @@ -324,7 +325,7 @@ func (az *AzureClient) EnsureProvidersRegistered(subscriptionID string) error { for _, provider := range RequiredResourceProviders { registered, ok := m[strings.ToLower(provider)] if !ok { - return fmt.Errorf("Unknown resource provider %q", provider) + return errors.Errorf("Unknown resource provider %q", provider) } if registered { log.Debugf("Already registered for %q", provider) @@ -346,7 +347,7 @@ func parseRsaPrivateKey(path string) (*rsa.PrivateKey, error) { block, _ := pem.Decode(privateKeyData) if block == nil { - return nil, fmt.Errorf("Failed to decode a pem block from private key") + return nil, errors.New("Failed to decode a pem block from private key") } privatePkcs1Key, errPkcs1 := x509.ParsePKCS1PrivateKey(block.Bytes) @@ -358,12 +359,12 @@ func parseRsaPrivateKey(path string) (*rsa.PrivateKey, error) { if errPkcs8 == nil { privatePkcs8RsaKey, ok := privatePkcs8Key.(*rsa.PrivateKey) if !ok { - return nil, fmt.Errorf("pkcs8 contained non-RSA key. Expected RSA key") + return nil, errors.New("pkcs8 contained non-RSA key. Expected RSA key") } return privatePkcs8RsaKey, nil } - return nil, fmt.Errorf("failed to parse private key as Pkcs#1 or Pkcs#8. (%s). (%s)", errPkcs1, errPkcs8) + return nil, errors.Errorf("failed to parse private key as Pkcs#1 or Pkcs#8. (%s). (%s)", errPkcs1, errPkcs8) } //AddAcceptLanguages sets the list of languages to accept on this request diff --git a/pkg/armhelpers/deploymentError_test.go b/pkg/armhelpers/deploymentError_test.go index 0d288d5e18..16e1e1148a 100644 --- a/pkg/armhelpers/deploymentError_test.go +++ b/pkg/armhelpers/deploymentError_test.go @@ -6,6 +6,9 @@ import ( . "github.com/Azure/acs-engine/pkg/test" . "github.com/onsi/gomega" + "fmt" + + "github.com/Azure/azure-sdk-for-go/arm/resources/resources" . "github.com/onsi/ginkgo" log "github.com/sirupsen/logrus" ) @@ -63,4 +66,67 @@ var _ = Describe("Template deployment tests", func() { Expect(string(deplErr.Response)).To(ContainSubstring("\"code\":\"Conflict\"")) Expect(len(deplErr.OperationsLists)).To(Equal(0)) }) + + It("Should return deployment error with Operations Lists", func() { + mockClient := &MockACSEngineClient{} + mockClient.FailDeployTemplateWithProperties = true + logger := log.NewEntry(log.New()) + + err := DeployTemplateSync(mockClient, logger, "rg1", "agentvm", map[string]interface{}{}, map[string]interface{}{}) + Expect(err).NotTo(BeNil()) + deplErr, ok := err.(*DeploymentError) + Expect(ok).To(BeTrue()) + Expect(deplErr.TopError).NotTo(BeNil()) + Expect(deplErr.ProvisioningState).To(Equal("Failed")) + Expect(deplErr.StatusCode).To(Equal(200)) + Expect(string(deplErr.Response)).To(ContainSubstring("\"code\":\"Conflict\"")) + Expect(len(deplErr.OperationsLists)).To(Equal(2)) + }) + + It("Should return nil on success", func() { + mockClient := &MockACSEngineClient{} + logger := log.NewEntry(log.New()) + err := DeployTemplateSync(mockClient, logger, "rg1", "agentvm", map[string]interface{}{}, map[string]interface{}{}) + Expect(err).To(BeNil()) + }) }) + +func TestDeploymentError_Error(t *testing.T) { + operationsLists := make([]resources.DeploymentOperationsListResult, 0) + operationsList := resources.DeploymentOperationsListResult{} + operations := make([]resources.DeploymentOperation, 0) + id := "1234" + oID := "342" + provisioningState := "Failed" + status := map[string]interface{}{ + "message": "sample status message", + } + properties := resources.DeploymentOperationProperties{ + ProvisioningState: &provisioningState, + StatusMessage: &status, + } + operation1 := resources.DeploymentOperation{ + ID: &id, + OperationID: &oID, + Properties: &properties, + } + operations = append(operations, operation1) + operationsList.Value = &operations + operationsLists = append(operationsLists, operationsList) + deploymentErr := &DeploymentError{ + DeploymentName: "agentvm", + ResourceGroup: "rg1", + TopError: fmt.Errorf("sample error"), + ProvisioningState: "Failed", + Response: []byte("sample resp"), + StatusCode: 500, + OperationsLists: operationsLists, + } + errString := deploymentErr.Error() + expected := `DeploymentName[agentvm] ResourceGroup[rg1] TopError[sample error] StatusCode[500] Response[sample resp] ProvisioningState[Failed] Operations[{ + "message": "sample status message" +}]` + if errString != expected { + t.Errorf("expected error with message %s, but got %s", expected, errString) + } +} diff --git a/pkg/armhelpers/mockclients.go b/pkg/armhelpers/mockclients.go index b4820a9f23..1438b03cf3 100644 --- a/pkg/armhelpers/mockclients.go +++ b/pkg/armhelpers/mockclients.go @@ -23,6 +23,7 @@ type MockACSEngineClient struct { FailDeployTemplate bool FailDeployTemplateQuota bool FailDeployTemplateConflict bool + FailDeployTemplateWithProperties bool FailEnsureResourceGroup bool FailListVirtualMachines bool FailListVirtualMachineScaleSets bool @@ -187,6 +188,29 @@ func (mc *MockACSEngineClient) DeployTemplate(resourceGroup, name string, templa }}}, errors.New(errmsg) + case mc.FailDeployTemplateWithProperties: + errmsg := `resources.DeploymentsClient#CreateOrUpdate: Failure sending request: StatusCode=200 -- Original Error: Long running operation terminated with status 'Failed': Code="DeploymentFailed" Message="At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/arm-debug for usage details.` + resp := `{ +"status":"Failed", +"error":{ + "code":"DeploymentFailed", + "message":"At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/arm-debug for usage details.", + "details":[{ + "code":"Conflict", + "message":"{\r\n \"error\": {\r\n \"code\": \"PropertyChangeNotAllowed\",\r\n \"target\": \"dataDisk.createOption\",\r\n \"message\": \"Changing property 'dataDisk.createOption' is not allowed.\"\r\n }\r\n}" +}]}}` + provisioningState := "Failed" + return &resources.DeploymentExtended{ + Response: autorest.Response{ + Response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(resp))), + }}, + Properties: &resources.DeploymentPropertiesExtended{ + ProvisioningState: &provisioningState, + }}, + errors.New(errmsg) default: return nil, nil } @@ -541,7 +565,40 @@ func (mc *MockACSEngineClient) ListProviders() (resources.ProviderListResult, er // ListDeploymentOperations gets all deployments operations for a deployment. func (mc *MockACSEngineClient) ListDeploymentOperations(resourceGroupName string, deploymentName string, top *int32) (result resources.DeploymentOperationsListResult, err error) { - return resources.DeploymentOperationsListResult{}, nil + resp := `{ + "properties": { + "provisioningState":"Failed", + "correlationId":"d5062e45-6e9f-4fd3-a0a0-6b2c56b15757", + "error":{ + "code":"DeploymentFailed","message":"At least one resource deployment operation failed. Please list deployment operations for details. Please see http://aka.ms/arm-debug for usage details.", + "details":[{"code":"Conflict","message":"{\r\n \"error\": {\r\n \"message\": \"Conflict\",\r\n \"code\": \"Conflict\"\r\n }\r\n}"}] + } + } +}` + + provisioningState := "Failed" + id := "00000000" + operationID := "d5062e45-6e9f-4fd3-a0a0-6b2c56b15757" + nextLink := fmt.Sprintf("https://management.azure.com/subscriptions/11111/resourcegroups/%s/deployments/%s/operations?$top=%s&api-version=2018-02-01", resourceGroupName, deploymentName, "5") + return resources.DeploymentOperationsListResult{ + Response: autorest.Response{ + Response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(resp))), + }, + }, + Value: &[]resources.DeploymentOperation{ + { + ID: &id, + OperationID: &operationID, + Properties: &resources.DeploymentOperationProperties{ + ProvisioningState: &provisioningState, + }, + }, + }, + NextLink: &nextLink, + }, nil } // ListDeploymentOperationsNextResults retrieves the next set of results, if any. diff --git a/pkg/armhelpers/utils/util.go b/pkg/armhelpers/utils/util.go index 05f9cb69fe..3271307a7f 100644 --- a/pkg/armhelpers/utils/util.go +++ b/pkg/armhelpers/utils/util.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -53,7 +54,7 @@ func ResourceName(ID string) (string, error) { parts := strings.Split(ID, "/") name := parts[len(parts)-1] if len(name) == 0 { - return "", fmt.Errorf("resource name was missing from identifier") + return "", errors.Errorf("resource name was missing from identifier") } return name, nil @@ -79,13 +80,13 @@ func SplitBlobURI(URI string) (string, string, string, error) { func K8sLinuxVMNameParts(vmName string) (poolIdentifier, nameSuffix string, agentIndex int, err error) { vmNameParts := vmnameLinuxRegexp.FindStringSubmatch(vmName) if len(vmNameParts) != 4 { - return "", "", -1, fmt.Errorf("resource name was missing from identifier") + return "", "", -1, errors.Errorf("resource name was missing from identifier") } vmNum, err := strconv.Atoi(vmNameParts[k8sLinuxVMAgentIndexArrayIndex]) if err != nil { - return "", "", -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, errors.Wrap(err, "Error parsing VM Name") } return vmNameParts[k8sLinuxVMAgentPoolNameIndex], vmNameParts[k8sLinuxVMAgentClusterIDIndex], vmNum, nil @@ -95,7 +96,7 @@ func K8sLinuxVMNameParts(vmName string) (poolIdentifier, nameSuffix string, agen func VmssNameParts(vmssName string) (poolIdentifier, nameSuffix string, err error) { vmssNameParts := vmssnameRegexp.FindStringSubmatch(vmssName) if len(vmssNameParts) != 3 { - return "", "", fmt.Errorf("resource name was missing from identifier") + return "", "", errors.New("resource name was missing from identifier") } return vmssNameParts[vmssAgentPoolNameIndex], vmssNameParts[vmssClusterIDIndex], nil @@ -105,7 +106,7 @@ func VmssNameParts(vmssName string) (poolIdentifier, nameSuffix string, err erro func WindowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIndex int, agentIndex int, err error) { vmNameParts := vmnameWindowsRegexp.FindStringSubmatch(vmName) if len(vmNameParts) != 4 { - return "", "", -1, -1, fmt.Errorf("resource name was missing from identifier") + return "", "", -1, -1, errors.New("resource name was missing from identifier") } poolPrefix = vmNameParts[k8sWindowsVMAgentPoolPrefixIndex] @@ -114,16 +115,12 @@ func WindowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIn poolIndex, err = strconv.Atoi(poolInfo[:3]) if err != nil { - return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, -1, errors.Wrap(err, "Error parsing VM Name") } poolIndex -= 900 agentIndex, _ = strconv.Atoi(poolInfo[3:]) fmt.Printf("%d\n", agentIndex) - if err != nil { - return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err) - } - return poolPrefix, acsStr, poolIndex, agentIndex, nil } @@ -131,7 +128,7 @@ func WindowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIn func WindowsVMSSNameParts(vmssName string) (poolPrefix string, acsStr string, poolIndex int, err error) { vmssNameParts := vmssnameWindowsRegexp.FindStringSubmatch(vmssName) if len(vmssNameParts) != 4 { - return "", "", -1, fmt.Errorf("resource name was missing from identifier") + return "", "", -1, errors.Errorf("resource name was missing from identifier") } poolPrefix = vmssNameParts[windowsVmssAgentPoolNameIndex] @@ -140,7 +137,7 @@ func WindowsVMSSNameParts(vmssName string) (poolPrefix string, acsStr string, po poolIndex, err = strconv.Atoi(poolInfo) if err != nil { - return "", "", -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, errors.Wrap(err, "Error parsing VM Name") } poolIndex -= 900 @@ -180,5 +177,5 @@ func GetK8sVMName(osType api.OSType, isAKS bool, nameSuffix, agentPoolName strin if osType == api.Windows { return fmt.Sprintf("%s%s%d%d", nameSuffix[:5], prefix, 900+agentPoolIndex, agentIndex), nil } - return "", fmt.Errorf("Failed to reconstruct VM Name") + return "", errors.Errorf("Failed to reconstruct VM Name") } diff --git a/pkg/armhelpers/utils/util_test.go b/pkg/armhelpers/utils/util_test.go index eca047f87b..e4feb42b3c 100644 --- a/pkg/armhelpers/utils/util_test.go +++ b/pkg/armhelpers/utils/util_test.go @@ -157,16 +157,42 @@ func Test_GetK8sVMName(t *testing.T) { nameSuffix, agentPoolName string agentPoolIndex, agentIndex int expected string + expectedErr bool }{ - {api.Linux, true, "35953384", "agentpool1", 0, 2, "aks-agentpool1-35953384-2"}, - {api.Windows, false, "35953384", "agentpool1", 0, 2, "35953k8s9002"}, + {api.Linux, true, "35953384", "agentpool1", 0, 2, "aks-agentpool1-35953384-2", false}, + {api.Windows, false, "35953384", "agentpool1", 0, 2, "35953k8s9002", false}, + {"macOS", false, "35953384", "agentpool1", 0, 2, "", true}, } { vmName, err := GetK8sVMName(s.osType, s.isAKS, s.nameSuffix, s.agentPoolName, s.agentPoolIndex, s.agentIndex) - if err != nil { - t.Fatalf("unexpected error: %s", err) + + if !s.expectedErr { + if err != nil { + t.Fatalf("unexpected error: %s", err) + } } if vmName != s.expected { t.Fatalf("vmName %s, expected %s", vmName, s.expected) } } } + +func Test_ResourceName(t *testing.T) { + s := "https://vhdstorage8h8pjybi9hbsl6.blob.core.windows.net/vhds/osdisks/disk1234.vhd" + expected := "disk1234.vhd" + r, err := ResourceName(s) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if r != expected { + t.Fatalf("resourceName %s, expected %s", r, expected) + } +} + +func Test_ResourceNameInvalid(t *testing.T) { + s := "https://vhdstorage8h8pjybi9hbsl6.blob.core.windows.net/vhds/osdisks/" + expectedMsg := "resource name was missing from identifier" + _, err := ResourceName(s) + if err == nil || err.Error() != expectedMsg { + t.Fatalf("expected error with message: %s", expectedMsg) + } +} diff --git a/pkg/helpers/helpers_test.go b/pkg/helpers/helpers_test.go index 4764212ac4..a9b6765c5f 100644 --- a/pkg/helpers/helpers_test.go +++ b/pkg/helpers/helpers_test.go @@ -1,16 +1,36 @@ package helpers -import "testing" +import ( + "bytes" + "crypto/x509" + "encoding/pem" + "math/rand" + "testing" -func TestPointerToBool(t *testing.T) { - boolVar := true - ret := PointerToBool(boolVar) - if *ret != boolVar { - t.Fatalf("expected PointerToBool(true) to return *true, instead returned %#v", ret) + "github.com/Azure/acs-engine/pkg/i18n" +) + +type ContainerService struct { + ID string `json:"id"` + Location string `json:"location"` + Name string `json:"name"` +} + +func TestJSONMarshal(t *testing.T) { + input := &ContainerService{} + result, _ := JSONMarshal(input, false) + expected := "{\"id\":\"\",\"location\":\"\",\"name\":\"\"}\n" + if string(result) != expected { + t.Fatalf("JSONMarshal returned unexpected result: expected %s but got %s", expected, string(result)) + } + result, _ = JSONMarshalIndent(input, "", "", false) + expected = "{\n\"id\": \"\",\n\"location\": \"\",\n\"name\": \"\"\n}\n" + if string(result) != expected { + t.Fatalf("JSONMarshal returned unexpected result: expected \n%sbut got \n%s", expected, result) } } -func TestIsRegionNormalized(t *testing.T) { +func TestNormalizeAzureRegion(t *testing.T) { cases := []struct { input string expectedResult string @@ -40,3 +60,107 @@ func TestIsRegionNormalized(t *testing.T) { } } } + +func TestPointerToBool(t *testing.T) { + boolVar := true + ret := PointerToBool(boolVar) + if *ret != boolVar { + t.Fatalf("expected PointerToBool(true) to return *true, instead returned %#v", ret) + } + + if IsTrueBoolPointer(ret) != boolVar { + t.Fatalf("expected IsTrueBoolPointer(*true) to return true, instead returned %#v", IsTrueBoolPointer(ret)) + } + + boolVar = false + ret = PointerToBool(boolVar) + if *ret != boolVar { + t.Fatalf("expected PointerToBool(false) to return *false, instead returned %#v", ret) + } + + if IsTrueBoolPointer(ret) != boolVar { + t.Fatalf("expected IsTrueBoolPointer(*false) to return false, instead returned %#v", IsTrueBoolPointer(ret)) + } +} + +func TestCreateSSH(t *testing.T) { + rg := rand.New(rand.NewSource(42)) + + expectedPublicKeyString := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCyx5MHXjJvJAx5DJ9FZNIDa/QTWorSF+Ra21Tz49DQWfdSESnCGFFVBh/MQUFGv5kCenbmqEjsWF177kFOdv1vOTz4sKRlHg7u3I9uCyyZQrWx4X4RdNk7eX+isQVjFXYw2W1rRDUrnK/82qVTv1f0gu1DV4Z7GoIa2jfJ0zBUY3IW0VN9jYaPVuwv4t5y2GwSZF+HBRuOfLfiUgt4+qVFOz4KwRaEBsVfWxlidlT3K3/+ztWpFOmaKIOjQreEWV10ZSo3f9g6j/HdMPtwYvRCtYStbFCRmcbPr9nuR84SAX/4f95KvBAKLnXwb5Bt71D2vAlZSW1Ylv2VbcaZ73+43EpyphYCSg3kOCdwsqE/EU+Swued82SguLALD3mNKbxHGJppFjz3GMyPpJuSH5EE1OANyPxABCwCYycKiNWbOPi3l6o4tMrASYRXi8l3l9JCvioUJ3bXXH6cDpcP4P6QgsuxhwVkUiECU+dbjJXK4gAUVuWKkMOdY7ITh82oU3wOWXbk8K3bdIUp2ylcHeAd2pekGMuaEKGbrXGRiBitCEjl67Bj5opQflgSmI63g8Sa3mKOPGRYMI5MXHMVj4Rns5JFHoENuImrlvrbLv3izAwO61vgN7iK26BwzO7jz92fNOHGviejNWYJyi4vZlq07153NZXP8D2xYTebh9hwHQ==\n" + + expectedPrivateKeyString := `-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEAsseTB14ybyQMeQyfRWTSA2v0E1qK0hfkWttU8+PQ0Fn3UhEp +whhRVQYfzEFBRr+ZAnp25qhI7Fhde+5BTnb9bzk8+LCkZR4O7tyPbgssmUK1seF+ +EXTZO3l/orEFYxV2MNlta0Q1K5yv/NqlU79X9ILtQ1eGexqCGto3ydMwVGNyFtFT +fY2Gj1bsL+LecthsEmRfhwUbjny34lILePqlRTs+CsEWhAbFX1sZYnZU9yt//s7V +qRTpmiiDo0K3hFlddGUqN3/YOo/x3TD7cGL0QrWErWxQkZnGz6/Z7kfOEgF/+H/e +SrwQCi518G+Qbe9Q9rwJWUltWJb9lW3Gme9/uNxKcqYWAkoN5DgncLKhPxFPksLn +nfNkoLiwCw95jSm8RxiaaRY89xjMj6Sbkh+RBNTgDcj8QAQsAmMnCojVmzj4t5eq +OLTKwEmEV4vJd5fSQr4qFCd211x+nA6XD+D+kILLsYcFZFIhAlPnW4yVyuIAFFbl +ipDDnWOyE4fNqFN8Dll25PCt23SFKdspXB3gHdqXpBjLmhChm61xkYgYrQhI5euw +Y+aKUH5YEpiOt4PEmt5ijjxkWDCOTFxzFY+EZ7OSRR6BDbiJq5b62y794swMDutb +4De4itugcMzu48/dnzThxr4nozVmCcouL2ZatO9edzWVz/A9sWE3m4fYcB0CAwEA +AQKCAgEArQmNvWvm1LvHdsJIxhm3S6iJLNJN2ttVIrt3ljfCPGdXgg8qo7p1vh2X +WVMvoxJ/Pm7Z9pabPmao1PLeMtvooGZ+JRaTh2t4eKjyCki2egCfa/Qc2TiHqZEH +gKhl1mlHZDCOP2xdKkEV9V6K9mwU7YxrqOpmN3CIzQS5SpcmCAfYvU0Nyk/ZFZPE +NvUW6YGf2I1eCIlhCqCcOmm+wPGYVVHp0u7gpBkJoCnEgBCYXEO2NyJqmqSrFZJx +FuvURD1avvXLzrvmxYfdSYHHXBfq40ZdjJ1xvftg+lPyUzcctUDOY+8fcKZlv/UI +IhdZa45ehvGo+sqfE0fRWXhO6V9t9hdHwOq6ZEF2TtaA9qwPpZxiN5BN7G6Vi6Bm +u3HhSCHyEIdySi9/hX3fhDrhPN08NULLhpiKuSiFQesmUxFxWAprMpEyCdx0wva7 +5tZTQQfmVHCoWyVXWNMGTGBA/h8SWquoQWWhpG7UWCt0A0e0kcbegZTQPddxgITe +uqf6GadbajAr6Qwicf5yNH7bVPiD8dGWU07W3t4C0JyLGNLN34aT0OpleSck4dGp +V2UYylQNkf/EmxTY/CCPtNVVKng3CJ+jZvS4MOKvTi+vvsccd8x6BEo9xKetJhAA +SQeNDMu9tEPlZNHC972YNLb+LPm+feqgM2W/qcONtNhPw1INW+ECggEBAOmPO9jz +q6Gm8nNoALteuAD58pJ/suJTfhXbkGBOCG+hazlmk3rGzf9G/cK2jbS3ePoHw7b9 +oJcpoF2L1nUCdwxTJMUS+iyfVRQ4L8lRDC95x3vdBcdgFZUQgEx1L6hKuK5BpZOY +fyvIEmwpW7OpCOEqXeMOq3agR4//uptIyNCzyIPJz43H0dh6m4l+fYy53AOvDAeW +Xk0wERP6bolngkVnz1XbE43UNZqTFkGMF4gjJCbZ+UguOltsZXSPLA+ruRy3oYGn +LVo1ntAf8Ih94F43Y8Doe+VX3y2UJUqQa/ZFG2nu6KeuDWhwRS/XZQSkxrJ0bO2w +6eOCOEqggO7Qz7sCggEBAMP08Q1nPfmwdawEYWqopKeAMh00oMoX14u8UDmYejiH +uBegwzqgmOLfajFMJDnNXTyzxIRIndzrvXzvtFpSHkh29sOXXG9xlGyLWZGcxtzW +ivyTMw/pTg3yjN0qsleRB/o89VOYP2OG+1XjEcie6LNxXUN/wG5gUx8Wumb2c1hW +XBDM6cRbiSuJuINjscUgiHXKQddfu1cVRaNUgP1PGniKydCqdI2rUUQhziTmmj+o +q+dSv6nGRaK3uNhJrhpMlljxy1Mcr9zLP5FM1GjaF+VQ3zHNxDDbXl13rQPpDocw +vu9tAS/J1+vTgKzcHjKnudUWmoNahT3f4/86fc6XJgcCggEBAMK4ry3Goa5JUNPU +vt94LbJqsMlg+9PjxjgU8T7JcBEZpBqcIZL4EqClIEXpCyXC3XKfbJWwyOWeR9wW +DPtKzdQRsZM4qijvwe/0lCqkjqM6RY1IDVxXCEdaFY0pGk2V1nk5tADk4AmxaWKR +7KlR4VxQhSwbe+qP4Hn2vC5gtUQCz8bIR2muUY7JUcmFEslz3zGXDFF7FS4HSAW/ +Ac8+5AZXcS3kU14osXQo8yI82RWgLrDRhBqgp/i227Mc9qAuDEwb8OP2bEJMeBaO +umwhfiEuztTzPvBLnX8Thy+uTsRog12DWKcL3pPXHmevjcIcWqhHltVobOdIFwRo +4nW406cCggEBALmwZ6hy2Ai/DZL3B7VBn93WHicM0v0OwMN6rG8XrWHaQjmprrbk +rlv2qDOU2pMnpx25oBRWl7lcbtBweXBJdsbmbIoF6aL1d1ewaS0R6mQkrcoQVwfR +5pRS7uc56YwPNAcOMs+HazIOHCdUKGr7IrnASEeJTLmLb9j6+aJOEhl4pH+LHk5j +C0YFmKJxG2kYnhc4lVHZNrabwsS2dBEWH5hwtDOXAyGoYTb17dmL6ElAtb1b7aGc +8Cn0fSYAFAp53tLkNe9JNOE+fLtcmb/OQ2ybSRVxzmMZzX82w+37sDetmpFZsxEs +7P5dCwdDAx6vT+q8I6krYy2x9uTJ8aOOGYsCggEAAW9qf3UNuY0IB9kmHF3Oo1gN +s82h0OLpjJkW+5YYC0vYQit4AYNjXw+T+Z6WKOHOG3LIuQVC6Qj4c1+oN6sJi7re +Ey6Zq7/uWmYUpi9C8CbX1clJwany0V2PjGKL94gCIl7vaXS/4ouzzfl8qbF7FjQ4 +Qq/HPWSIC9Z8rKtUDDHeZYaLqvdhqbas/drqCXmeLeYM6Om4lQJdP+zip3Ctulp1 +EPDesL0rH+3s1CKpgkhYdbJ675GFoGoq+X21QaqsdvoXmmuJF9qq9Tq+JaWloUNq +2FWXLhSX02saIdbIheS1fv/LqekXZd8eFXUj7VZ15tPG3SJqORS0pMtxSAJvLw== +-----END RSA PRIVATE KEY----- +` + + translator := &i18n.Translator{ + Locale: nil, + } + + privateKey, publicKey, err := CreateSSH(rg, translator) + if err != nil { + t.Fatalf("failed to generate SSH: %s", err) + } + pemBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + } + pemBuffer := bytes.Buffer{} + pem.Encode(&pemBuffer, pemBlock) + + if string(pemBuffer.Bytes()) != expectedPrivateKeyString { + t.Fatalf("Private Key did not match expected format/value") + } + + if publicKey != expectedPublicKeyString { + t.Fatalf("Public Key did not match expected format/value") + } +} diff --git a/pkg/openshift/certgen/release39/config.go b/pkg/openshift/certgen/release39/config.go index 6d129c8dee..ea6167d238 100644 --- a/pkg/openshift/certgen/release39/config.go +++ b/pkg/openshift/certgen/release39/config.go @@ -27,12 +27,14 @@ type Config struct { // AzureConfig represents the azure.conf configuration type AzureConfig struct { - TenantID string - SubscriptionID string - AADClientID string - AADClientSecret string - ResourceGroup string - Location string + TenantID string + SubscriptionID string + AADClientID string + AADClientSecret string + ResourceGroup string + Location string + SecurityGroupName string + PrimaryAvailabilitySetName string } // Master represents an OpenShift master configuration diff --git a/pkg/openshift/certgen/release39/defaults.go b/pkg/openshift/certgen/release39/defaults.go index e5b26852fd..19d6ed7af9 100644 --- a/pkg/openshift/certgen/release39/defaults.go +++ b/pkg/openshift/certgen/release39/defaults.go @@ -30,12 +30,14 @@ func OpenShiftSetDefaultCerts(a *api.Properties, orchestratorName, clusterID str ClusterPassword: a.OrchestratorProfile.OpenShiftConfig.ClusterPassword, EnableAADAuthentication: a.OrchestratorProfile.OpenShiftConfig.EnableAADAuthentication, AzureConfig: AzureConfig{ - TenantID: a.AzProfile.TenantID, - SubscriptionID: a.AzProfile.SubscriptionID, - AADClientID: a.ServicePrincipalProfile.ClientID, - AADClientSecret: a.ServicePrincipalProfile.Secret, - ResourceGroup: a.AzProfile.ResourceGroup, - Location: a.AzProfile.Location, + TenantID: a.AzProfile.TenantID, + SubscriptionID: a.AzProfile.SubscriptionID, + AADClientID: a.ServicePrincipalProfile.ClientID, + AADClientSecret: a.ServicePrincipalProfile.Secret, + ResourceGroup: a.AzProfile.ResourceGroup, + Location: a.AzProfile.Location, + SecurityGroupName: fmt.Sprintf("%s-master-%s-nsg", orchestratorName, clusterID), + PrimaryAvailabilitySetName: fmt.Sprintf("compute-availabilityset-%s", clusterID), }, } diff --git a/pkg/openshift/certgen/release39/files.go b/pkg/openshift/certgen/release39/files.go index 7f2474cef8..c6ea18d53c 100644 --- a/pkg/openshift/certgen/release39/files.go +++ b/pkg/openshift/certgen/release39/files.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "os" "regexp" + "strconv" "strings" "text/template" @@ -112,6 +113,10 @@ func (c *Config) WriteMasterFiles(fs filesystem.Writer) error { h, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) return string(h), err }, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, }).Parse(string(tb)) if err != nil { return err @@ -146,6 +151,10 @@ func (c *Config) WriteNodeFiles(fs filesystem.Writer) error { t, err := template.New("template").Funcs(template.FuncMap{ "QuoteMeta": regexp.QuoteMeta, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, }).Parse(string(tb)) if err != nil { return err diff --git a/pkg/openshift/certgen/release39/templates/bindata.go b/pkg/openshift/certgen/release39/templates/bindata.go index 184ddf2182..4dda8a18e8 100644 --- a/pkg/openshift/certgen/release39/templates/bindata.go +++ b/pkg/openshift/certgen/release39/templates/bindata.go @@ -79,7 +79,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _masterEtcEtcdEtcdConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x53\x5d\x6f\xea\x38\x10\x7d\xcf\xaf\x40\xe6\x65\xf7\xa1\x90\xd2\xaf\xa5\x92\x1f\x8c\x33\x80\x85\x49\xb2\xb6\x03\x45\x55\x65\x51\x1a\x68\xb4\x14\x50\x12\xba\xad\x10\xff\xfd\xca\x49\x20\xb4\xd0\xab\xab\xdb\xfb\x96\xcc\x39\x33\x3e\x33\x73\x06\x14\x75\xb4\x4b\xfa\x80\x37\x9b\x4a\xad\x3f\x4e\xd2\x30\xae\x75\x97\x49\xba\x18\xbf\x84\x95\xed\xd6\xca\x08\x9c\x49\x05\xae\xf6\x01\x84\x0e\x04\x97\xf8\x39\x4d\x57\xc9\x6d\xbd\xbe\xd9\x54\xfe\x8a\x16\x4f\xe1\xdb\x3e\x97\xf9\x49\xc5\xfe\xbb\x26\xd3\x38\x5a\xcc\x2a\xdb\xed\x6d\xe3\xe2\x1f\x3b\xaf\xe2\x10\x45\xb4\xc3\x04\xae\xbf\x8e\xe3\xfa\x3c\x7a\xac\x87\xe9\xe4\xa9\x6e\x55\x33\x74\x48\x78\x06\x22\x54\x04\xa4\x4b\x7c\xd9\xf5\x94\xa6\x5e\xe0\x2a\x7c\x6e\xdb\x76\x51\xa8\x0b\x44\xa8\x16\x10\xa5\x99\xab\x40\x0c\x08\xc7\x57\x3b\x0c\x38\x50\xc5\x3c\x57\x2b\xd6\x07\x2f\x50\xb8\xb1\x87\x8a\x2e\x28\x67\xe0\xaa\xdf\xe8\xe3\xa6\x59\x28\xeb\x93\xbb\xbd\x3a\x89\xaf\x0e\xa2\x43\xc2\xcb\x00\xf5\x84\xc4\x96\x65\x55\xef\x27\xf3\xb5\x29\xfa\x90\xeb\x60\x2e\x53\x8c\x70\x4d\x9c\x01\x08\xc5\x24\x7c\x77\xb0\xbb\x82\x94\x07\x52\x81\xf8\x62\x95\x7f\xa4\xb6\x96\x8a\x28\xc0\x8b\xf0\xff\xd3\xb0\xf2\x7a\xe0\x62\xb3\xd7\xb3\xa2\xe9\xb3\xf3\x62\x1c\x0e\x93\xd4\x1b\x80\x18\xe1\xcf\x01\x2d\xc5\xe0\x38\xd8\x26\x9c\xb7\x08\xed\xe1\x55\xbc\x7c\x7b\x3f\x82\x7d\xe1\xdd\x8d\x70\xae\xa2\x9c\xe4\xf7\x97\x2b\x95\x60\x54\x69\x01\xd4\x73\xdb\xac\xa3\x69\x17\x68\x0f\xa3\xe9\x78\x9e\x84\x3b\x6b\x92\x40\x79\x9a\x7a\x7d\x9f\xe4\x5e\x13\xa0\xc0\x35\x5f\x18\xd9\x3b\x0e\xb8\xa4\xc5\x41\x0f\x1a\x18\xa5\xf1\x3a\x44\xb9\xd2\x7f\x03\x4f\x11\x6d\xda\x02\xd7\xd1\xad\x91\x02\x89\x2f\x1b\xcd\xcb\xe6\xf5\x4d\xa3\x79\x6d\xbc\x92\x75\xfb\x50\x14\xc9\x9b\x5c\x4e\xa7\x87\xff\xba\x4d\x18\x0f\x04\xe8\x21\x61\x0a\xa3\x2b\xdb\xde\x3f\x9a\xe3\x02\xda\x02\x64\xb7\xbc\x0e\x74\x61\x1f\x91\x1c\xb3\xb6\xdd\x91\xa0\xf3\x23\x7c\x28\x98\x82\x92\x70\xea\x15\xe2\x94\xb8\x8d\x8c\xf8\x24\x9c\xac\xe3\x28\x7d\x2f\x9c\xae\x84\x71\x85\xa3\x29\xd1\x6d\xc6\x01\x9b\x83\xcf\x8f\x7e\x32\xae\x4d\xe2\x34\x67\x15\x3b\xa3\x20\x94\x99\x6c\xf7\xc3\xc0\xb2\xe8\xa7\xe4\x24\x8c\x5f\xc3\xb8\x2c\xd0\x83\xd1\x17\x94\xff\xc2\xf7\xc3\x95\x29\x2e\xf7\x9b\xcc\x3b\x31\x87\xf7\x6b\x2a\x33\xea\x4f\xa5\xe6\x8c\x13\x7a\x57\xe1\xa1\xda\x8c\x76\x42\x72\xc6\x2a\x05\x67\xb4\x23\xd5\x56\xf5\x7e\xbe\x9c\xcd\xa2\xc5\xac\x18\xb1\x03\xad\xa0\x83\x51\xfb\xa0\x29\xee\x75\xb4\x4f\x68\x8f\x74\x40\x73\x18\x80\x49\x37\x2f\xe4\x33\xc1\x59\x06\x2a\xac\x36\x8d\xe6\x59\xad\x0f\x9e\xf5\x7d\xe1\xb5\x3f\x59\xbe\x0f\xe6\x2e\x24\x46\x8f\xe3\x24\x9a\x20\xab\x6a\x55\xef\xc7\xeb\xf4\xf9\xa1\x9c\x6f\xb7\xb8\x7e\x94\x44\x2f\xab\x79\x88\xac\x1f\x01\x00\x00\xff\xff\x20\x54\xc1\x6d\x5d\x06\x00\x00") +var _masterEtcEtcdEtcdConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x54\x5d\x6f\xea\x38\x10\x7d\xcf\xaf\x88\xcc\xcb\xee\xc3\x85\x94\xdb\x8f\xa5\x92\x1f\x8c\x33\x80\x85\x49\x52\xdb\x81\xa2\xaa\xb2\x28\x0d\x34\xda\x14\x50\x12\xba\xad\xba\xfd\xef\x2b\x27\x81\xd0\xc2\xae\x56\xba\xea\x1b\xcc\x1c\xcf\x9c\x39\x73\x26\xa0\xa8\xab\x3d\x32\x02\xfc\xfe\x6e\x37\x47\xb3\x2c\x8f\xd2\xe6\x60\x9d\xe5\xab\xd9\x73\x64\xff\x6d\x67\x4f\x51\x92\xdc\x6c\xd7\x79\x64\x7f\x7c\x58\x05\x9a\x33\xa9\xc0\xd3\x01\x80\xd0\xa1\xe0\xd2\xbc\xdc\xa4\xf1\x2a\xb7\xd1\x53\x9e\x6f\xb2\xeb\x56\x0b\xd9\xbf\xc5\xab\xc7\xe8\x75\x5f\x91\x05\x99\xed\xfc\xde\x94\x79\x1a\xaf\x96\x36\xba\x6e\xff\xfc\xc3\x41\xa7\xcb\xbb\x44\x11\xed\x32\x81\x5b\x2f\xb3\xb4\x95\xc4\x0f\xad\x28\x9f\x3f\xb6\xac\x46\x91\x9d\x10\x5e\x24\x11\xaa\x02\xd2\x23\x81\x1c\xf8\x4a\x53\x3f\xf4\x14\x3e\x73\x1c\xc7\x29\x0b\x0d\x80\x08\xd5\x05\xa2\x34\xf3\x14\x88\x31\xe1\xf8\x62\x97\x03\x0e\x54\x31\xdf\xd3\x8a\x8d\xc0\x0f\x15\x6e\xef\x53\xd5\x78\x94\x33\xf0\xd4\x2f\x0c\x78\xd5\x39\x1e\xb0\xa4\x3c\x22\xb7\x7b\xda\x12\x5f\x1c\x44\x27\x84\xd7\x01\xea\x0b\x89\x2d\xcb\x6a\xdc\xcd\x93\xad\x69\x72\x5f\x12\x64\x1e\x53\x8c\x70\x4d\xdc\x31\x08\xc5\x24\x7c\xdb\x2a\x76\x9d\x28\x0f\xa5\x02\x51\x57\x3f\x32\x0a\xc2\xdf\xd0\x4f\x4b\x45\x14\xe0\x55\xf4\xd7\xe9\xb4\xf2\x87\xe0\x61\xe3\x8e\x1f\x95\x42\x3f\xce\x2a\xed\x5c\x26\xa9\x3f\x06\x31\xc5\x5f\x03\x5a\x8a\xf1\x71\xb0\x47\x38\xef\x12\x3a\xc4\x9b\x74\xfd\xfa\x76\x94\x0e\x84\x7f\x3b\xc5\x25\x8b\x5a\xf6\x6f\xb4\x88\x54\x82\x51\xa5\x05\x50\xdf\xeb\xb1\xbe\xa6\x03\xa0\x43\x8c\x16\xb3\x24\x8b\x76\xce\x27\xa1\xf2\x35\xf5\x47\x01\x29\xad\x2c\x40\x81\x67\x7e\x61\xe4\xec\x30\xe0\x91\x2e\x07\x3d\x6e\x63\x94\xa7\xdb\x08\x95\x23\xdc\x84\xbe\x22\xda\xcc\x0b\x9e\xab\xbb\x53\x05\x12\x9f\xb7\x3b\xe7\x9d\xcb\xab\x76\xe7\xd2\x38\xae\x90\xe1\xbe\x2a\x52\x4e\xbf\x5e\x2c\x0e\xff\xeb\x1e\x61\x3c\x14\xa0\x27\x84\x29\x8c\x2e\x1c\x67\xdf\xb4\xcc\x0b\xe8\x09\x90\x83\xfa\xf8\xd0\x4f\xe7\x08\xe4\x9a\x7d\xee\x6e\x10\x9d\x1d\xe5\x27\x82\x29\xa8\x01\xa7\xba\x10\xb7\xce\x3b\xc8\x90\xcf\xa2\xf9\x36\x8d\xf3\xb7\xea\x5e\x94\x30\x76\x71\x35\x25\xba\xc7\x38\x60\xf3\x3d\x29\xbf\x29\xf3\x59\x73\x9e\xe6\x25\xaa\x5a\x26\x05\xa1\x8c\xb2\x83\x4f\x82\x15\xd1\x2f\x8f\xb3\x28\x7d\x89\xd2\xba\xc0\x10\xa6\xff\x02\xf9\x33\x7a\x3b\x5c\x99\xe2\x72\xbf\xc9\x72\x12\x73\xbe\xff\x8f\x65\x01\xfd\x4f\xaa\x25\xe2\x04\xdf\x4d\x74\xc8\xb6\x80\x9d\xa0\x5c\xa0\x6a\xc2\x05\xec\x88\xb5\xd5\xb8\x4b\xd6\xcb\x65\xbc\x5a\x56\x12\xbb\xd0\x0d\xfb\x18\xf5\x0e\x86\xe2\x7e\x5f\x07\x84\x0e\x49\x1f\x34\x87\x31\x98\xe7\xa6\x43\xa9\x09\x2e\x5e\xa0\xca\x6a\x8b\x38\x29\x6a\x7d\xf2\x6c\x10\x08\xbf\xf7\xc5\xf2\x23\x30\x77\x21\x31\x7a\x98\x65\xf1\x1c\x59\x0d\xab\x71\x37\xdb\xe6\x4f\xf7\xb5\xbe\x83\xea\xb3\x80\xb2\xf8\x79\x93\x44\xc8\xfa\x27\x00\x00\xff\xff\x6b\x4e\x0b\x40\xe2\x06\x00\x00") func masterEtcEtcdEtcdConfBytes() ([]byte, error) { return bindataRead( @@ -119,7 +119,7 @@ func masterEtcOriginMasterHtpasswd() (*asset, error) { return a, nil } -var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x79\x6f\x1b\xb9\x15\xff\x5f\x9f\x82\x08\x16\x48\x52\x74\x66\x24\x3b\xe7\x00\x45\xa1\xda\xce\x46\x58\x3b\x51\x65\xa7\xd8\xa2\x2e\x16\x14\xf9\x34\x62\xc4\x21\x27\x3c\x14\x2b\x6e\xbe\x7b\xc1\x63\x4e\x49\x49\x9a\x4d\xb1\xde\x45\xe2\xe1\x3b\xf8\xf8\xde\x8f\xef\x60\x30\x2d\x99\xd6\x4c\x8a\x33\x29\x56\xac\xc8\x47\x08\x55\xdc\x16\xac\xf3\x8d\xd0\xdf\x2c\xe3\xf4\x1c\x56\xd8\x72\xa3\xc3\x12\x42\xc4\x33\x58\x85\x0d\x93\xa2\x5e\x44\x08\x57\xec\x1f\xa0\x9c\xc6\x1c\x6d\x27\xcd\x32\x88\x6d\x8e\xfe\xf5\xef\xe6\x7b\xc3\x04\xcd\xfb\x8a\xc3\x8e\x0d\x87\x02\x2d\xad\x22\xa0\x5b\xdd\x08\x71\x56\x32\xa3\x73\x74\xff\xb9\xb3\xa8\xe0\x83\x05\xdd\x59\xf6\x6a\xdf\x6e\x41\x29\x46\xe1\x3b\x0d\xee\x18\xd8\x68\xea\x58\x38\x97\x74\xae\x40\x83\xf9\x3e\xed\x94\x69\xbc\xe4\x90\xa3\x15\xe6\x1a\x06\x9b\x46\x87\x4c\xfb\xa1\xf1\x4c\xb2\x02\xa1\xd7\x6c\x65\x52\x26\xb3\x59\x89\x0b\x98\x4b\xce\xc8\xee\x3b\x83\x72\x07\xc4\x3a\xce\x85\xe5\x5d\x3f\x27\xa8\xc4\x86\xac\xbd\xfe\xa9\x10\xd2\x78\x75\xbd\x40\x24\x68\x03\xbb\x1c\x31\xc7\xa2\xd3\x9e\x59\x14\xc4\x2e\x69\x54\x77\x64\x10\xda\x62\x6e\x21\x47\x0f\x8d\xb2\xf0\xb0\x43\x11\xb8\x84\xbc\x35\x27\xa1\x20\x18\xd0\x0e\x83\x14\x8b\x43\x70\x48\x1a\x94\xe4\xa8\x92\x54\x1f\x21\x2d\x5d\x14\x75\x0f\x31\xef\x81\x98\x1c\x39\x3b\x3a\xcb\x7a\xc3\xaa\xb7\x7e\x27\xee\xed\x78\x85\x19\xb7\x0a\x06\x7c\x21\x48\x1d\xe7\xc7\xf8\xe0\xa2\x50\x50\x60\x23\x55\xe7\x2e\x29\x79\xb7\x3b\xe3\x0c\x84\x99\x89\x95\x0c\xb6\x13\x50\xe6\x15\x73\xd1\x6f\x45\x92\x95\x92\xc2\x24\x9e\x3f\x25\xca\x78\xc6\x0d\xec\xbe\xc8\xb7\x81\xdd\x08\x57\xec\x12\xb6\xc0\x75\x3e\x4a\x5c\x6c\x07\xa1\xc6\xd6\xac\x5b\x73\xe2\x4d\x79\x0d\x98\x82\x8a\xc6\x78\xe3\xce\xa6\x39\xea\x68\x4e\x08\x6e\x8c\x88\x0c\xb2\x2c\xa5\x78\x83\xcb\x3a\x00\xc9\x11\xa3\x46\x01\x58\x46\xe1\xb0\xcb\x5c\xc1\x8a\xdd\xb5\x52\xbf\x26\x0b\x28\xa5\x81\xe4\xc2\xf1\x24\x7e\xb5\x50\xd2\x56\x81\x7d\x9f\xef\x67\x47\xf4\x8b\x56\x83\x72\x48\x39\xc6\xf9\x4e\x83\x1a\x11\x29\x8c\x92\x9c\x43\x27\x0a\xc0\x81\xb4\x17\x82\x4b\xb2\x79\xe3\x01\xd7\xc0\x36\x29\xb1\x36\xa0\x92\x56\xd8\xa1\x45\x83\xda\x32\x02\xd7\xee\x2f\x51\x9c\x81\x8a\x97\x5d\xb3\x42\xd4\xee\xeb\x46\x33\xf2\x27\x81\xde\x38\xb0\x13\xc7\x01\x87\x8b\x5f\x67\xcb\x1c\x3d\xfc\xd3\xc3\x11\x91\x4a\x4f\x39\x97\x1f\x81\xbe\x55\xac\x60\xc2\x47\xf6\xd1\x5f\xd9\xe3\x2c\x9b\x9c\x3c\xbf\x4d\xc7\xfe\xff\xc9\xa3\xfc\x3f\xb7\x9f\x1e\x37\x24\x2e\x09\xe6\x6b\xa9\xcd\x60\xfd\xfe\x1e\xfd\xdd\x4a\x03\x57\x60\x30\x7a\xc4\x04\x85\x3b\x94\x5e\xf9\xe3\xa6\xb3\xb9\x46\xe3\xc7\xe9\xb5\x51\x4c\x14\xe8\xf3\xe7\x81\xe8\xc6\x2e\x41\x09\x30\xa0\x6f\x53\x1a\x72\xd2\xd7\x39\x6e\x53\xbd\x25\xb7\x29\xe1\xd6\x6d\x71\x9b\x7a\xbb\x8e\x8a\x7d\xc9\xd8\xf4\xe2\xce\xb8\x80\xf3\x60\xed\x6b\xa9\x8d\x8b\xfe\xbe\x9d\x4d\x18\x8f\x99\xd9\x57\x1b\x0f\xff\xbf\xe8\xf3\x87\xfa\xc6\xb3\x0f\xd8\x26\xcf\x4f\x6e\xd3\xd3\xc3\x31\x3b\xb2\xd1\x57\xbc\xd7\x48\xc5\x75\x2a\x74\x0b\xf5\x25\x13\x74\x4a\xa9\x02\xad\x73\x34\x4e\xfd\x7f\xf9\x8b\xf1\xd3\xd3\x48\x7b\x03\xe6\xa3\x54\x9b\x1c\x19\x52\x3d\x19\x81\x21\xb4\x9f\x9d\x08\xce\x51\xb8\x0c\xa9\x23\xb6\x89\xa0\x85\x79\x8f\xec\x85\x23\x4b\x03\xf3\x03\x1c\x0e\xea\x08\x59\xc5\xfd\xb5\x4d\xd0\xda\x98\x4a\xe7\x3e\x34\x07\x02\x92\x9f\x9c\x3e\x7f\xe9\xad\xbb\x36\x52\xe1\x02\xda\x03\xb6\x6e\x8f\xa4\x90\x60\xf2\x0e\x21\x65\xf2\x10\x63\xbf\x02\x3a\x2f\x5e\x3b\x2f\x0e\xd4\x74\x4b\xd9\x01\xb6\xae\x12\x5f\xfc\x5a\xcb\x56\x52\x95\xd8\xe4\x68\x76\x35\xfd\xf9\xe2\xb7\xf9\xe2\xe2\xd5\xec\xd7\x2c\x7c\xdc\xfc\x73\x7e\x91\xfc\x74\x4f\x64\x59\x49\x01\xc2\x7c\xce\x7f\xba\xdf\x06\x4d\xae\x63\xe1\xd8\x80\x36\x75\x33\xc0\x86\x95\xc5\x29\x67\x22\xdc\x85\x05\x14\x4c\x1b\xb5\xab\x9d\x95\x23\x2a\xc9\x06\x54\xa2\x22\xa1\x46\x92\x03\x52\xfe\x74\x3c\x1e\x8f\x42\xbd\x0a\x4e\x8e\xa5\xca\xf9\x86\x83\xd9\x0f\x3d\xc1\xc9\xd2\x0a\xca\xe1\x58\xd4\xa3\xe4\x97\x03\x3f\x60\x0a\xb1\xaf\xa4\x32\x39\x9a\x8c\x4f\x9e\x8e\x47\x6d\x6c\xba\x66\x39\x23\x70\xc5\x5c\xbe\x05\x35\x55\x85\x2d\x41\xd4\xfd\xa6\xb2\xc2\xb0\x12\x12\xd2\x69\x4b\x13\xc7\xad\x33\x0d\xc6\x30\x51\xe8\x74\xf3\xc2\x85\x3e\xdb\x4e\x30\xaf\xd6\x78\xf2\x97\xa6\x6a\xeb\x10\xbb\x64\x89\xc9\x06\x04\xad\xa5\x1d\xbe\x4e\x7b\x0c\x25\x50\x86\x13\xb3\xab\xa0\xdd\xa1\xe2\x8c\xf8\xfe\x27\xdb\x0a\x9a\x76\x50\x56\x29\x69\xe4\xd2\xae\x62\x95\x94\x96\xba\x0a\xb8\x65\x4d\x69\x4d\xd0\x03\xfc\xc9\x2a\x78\xd0\xe1\xe8\xdb\xff\x20\x03\x43\x32\xcf\x14\xfe\x4c\x1d\xdd\xf1\xb7\x65\x61\xe0\x88\x98\x1a\x7c\x09\x61\xa2\x48\x5c\x84\x92\x95\xf3\x7e\x4f\xa7\xf4\xa5\x23\x0b\x01\xc9\xc2\x2d\x7e\x70\x50\xc1\x06\x76\xdf\x22\xbf\x81\xdd\x83\xff\xcb\x49\xcb\x88\x00\x2b\x1c\x3c\x9a\x85\xd9\x3c\x47\xf7\xf7\x5f\xab\x58\x1e\x57\xf4\x62\xcb\x7c\x7d\xbf\x61\x25\x48\x6b\x72\x24\x2c\xe7\x5f\xef\xbe\x22\x5a\x63\xc7\xd3\x05\xf4\x3e\xa4\x7b\x4c\x01\xd0\x9a\xac\x81\xda\x5e\x84\xea\x8d\x1b\x52\x00\x76\xd0\x74\xc0\xaf\x0d\x5f\xfa\x5e\xfb\x56\x39\x36\x08\xfa\x8d\xa4\x30\x97\xca\x2c\xb0\x28\x5c\xc3\xfc\xb0\x43\xbb\xb6\x4b\x01\xce\x57\xcf\x4f\xd2\x53\x9f\xe0\xb3\xc9\x33\x47\x77\x6d\x3a\x71\x92\xa1\x4d\x73\xe3\x56\x74\xae\x37\xdb\x03\x08\x62\x4d\xfd\xa5\xc1\xf1\x59\xec\xef\x84\x08\x4d\xd2\x60\x6e\xc2\x84\x40\xe5\xc8\x06\x84\xb9\xd9\x55\x4e\xf1\x37\x5c\x8a\x3f\x77\x79\xe2\xe1\x10\x5a\x5a\xe5\xf2\xdc\x93\xf1\x78\x14\xa7\x95\x5a\xeb\x37\x29\xf5\x42\x1f\x2a\x9d\xa3\x13\xaf\x61\xff\x30\xee\xb7\x98\x4c\x82\xd3\x9a\x74\x7e\x29\x65\xe5\xee\xff\x1f\x70\xdc\x67\xbf\xfb\xb8\xa7\x5e\xc3\xde\x59\xba\xa7\x1d\xf6\xb2\x5e\x61\xb8\x83\x11\x05\x73\xbb\xe4\x8c\xbc\x5b\x5c\xe6\xbd\xea\x7b\xb4\xcb\xca\x3b\xb5\xd9\x61\xd1\x5d\x37\x11\xda\x87\x36\x5f\xc7\x6c\x12\xdb\x8a\xb3\xd9\xf9\xc2\xe5\xf8\x74\x72\xf2\x22\x00\xf3\xc9\x1e\x4f\x6c\x00\x08\xa3\x6a\x9f\x15\x21\xd7\xc2\x06\x84\x5f\x82\x28\xcc\x3a\x47\x2f\x3b\x91\x9e\xcd\x3b\x3b\x45\x4d\xb1\xcb\xc9\x9c\x8b\x0e\x4b\x47\xab\xe7\xfe\x61\x23\xb4\xfe\x0a\xe8\x1a\x9b\xb6\x97\x4a\xe4\x56\x27\xda\x4b\xb6\x57\xad\x7f\xaa\xde\x75\x93\xfd\xc1\x0a\x6b\x0d\xe6\x07\x38\x38\x23\x52\x68\xc9\x21\x1b\xb9\xa9\x08\x7b\xa0\x36\x59\xb4\x04\xb3\x96\x34\x47\xd8\x1a\xd7\x9a\x30\x0a\xc2\x30\xb3\x9b\xc7\x54\xac\xf3\xd1\xfd\x7d\x82\xd8\x0a\xa5\x17\x02\x2f\x39\x4c\xa7\xe7\x53\x6b\xd6\x8e\x2b\x00\xcd\xe7\xcb\x24\x4e\xdb\x53\x97\x85\xd1\xf4\x3c\x40\x73\x8d\x39\x07\x9f\x6b\xda\x17\x09\x2e\x0b\x26\x3a\xc3\x6f\x89\xab\x8a\x89\xe2\x2a\x9a\x41\x38\x66\xa5\x27\xf4\x8b\xc1\x91\xe7\x86\xd0\x86\xbc\xad\x40\xcc\xce\x67\x03\xd3\xeb\x51\x2a\xa4\xea\x73\x9f\xf9\x53\x6f\x60\x38\x7f\x3a\x9d\x9e\xc7\x3c\x7e\x1e\xb2\x7e\xcb\x7e\x0d\x44\xb9\x74\x78\x54\x24\x30\x74\xc5\x30\x2b\x3b\xcf\x08\x8c\x76\x5f\x3e\xb4\x5d\x36\x5f\x95\x82\x15\x28\x05\xf4\x5d\x9c\x3c\xbb\x8c\x56\xb0\x0f\x16\x7e\x73\xcb\xcd\xea\x90\xa7\x47\x84\x12\x33\xde\xa5\xfa\x85\xf8\x5d\x77\xc6\xd1\x81\xd6\xac\xa5\x62\x9f\xa0\x45\x92\x0f\x46\x5a\x32\xa2\xa4\x96\x2b\x23\x05\x67\xc2\x15\xd1\x32\x1b\x1e\xfc\x06\x04\x8e\x8e\xca\x3c\x4c\x4f\xb2\x46\x5f\xb3\x83\x91\x1b\x10\x3f\x48\xbb\xd7\xe5\xb1\x07\x82\xf6\x30\x76\xe9\xc6\x17\x54\x61\xad\x3f\x4a\x45\x87\x48\x6b\x80\xf5\x63\x81\xb6\x3a\x56\x6d\xd7\xc6\x5b\x42\x7b\x80\x7c\x7d\x33\xf7\x8b\xf3\x68\xe4\x01\x68\xc6\x22\x3a\xdd\x6f\x8f\x7f\x5c\x62\xad\x75\xfd\x5e\x2d\x1a\x06\xaf\xb9\xcd\xd2\x15\xbe\x9b\x16\x70\xed\x6a\x02\x75\x25\xa5\xae\x4a\x91\x1c\xd2\xa2\xd6\xa2\xbb\x18\xae\x8e\x3e\xde\xbf\x04\xb6\x44\x07\xbe\x74\x87\x4b\x07\x68\x0f\x88\xae\x09\xae\xaa\x6a\x7d\xe3\x96\x07\x66\xbc\x78\x56\x37\x03\x0d\x46\x0f\xb1\x3d\x1d\x8f\x47\x15\xb6\xda\xa1\xb0\x7d\x2e\x09\xa9\xaa\x1a\x8c\x4a\x4b\x29\x8d\x36\x0a\x57\x61\x86\x3a\x6a\x7c\x90\xab\x3b\xaf\xa6\x12\xcc\xc4\x4a\x61\x6d\x94\x25\xc6\xaa\xd0\x4a\x55\x98\xf4\x5e\x8b\x98\x63\xe9\xca\x5c\xaf\xb1\x02\xda\x3c\x51\x1e\x12\x1a\x55\x4a\xbe\x07\xd2\x49\xe8\x71\x50\x73\x0d\xdb\xb5\x7f\xa1\x92\x2a\x47\x42\x52\x48\x94\xe4\x90\xf6\xe6\xd9\xcc\x8d\x8e\xd6\x40\x3d\xd3\x44\x65\x8b\xf0\x9e\x77\x05\x5a\xe3\xa6\x4f\xec\xd3\x6e\xa0\xac\xdc\x7c\xd9\x34\x91\xc4\x2a\x66\x76\x53\xce\x25\xc1\x6e\xcb\x70\xe3\x88\x6e\x56\x62\xcf\xa9\xc7\x79\x76\x52\x13\x2f\xf1\x12\xb8\x9e\x83\x9a\x07\xe5\x39\x7a\x1a\x9e\xe4\x18\x1d\xca\x4d\xc6\xf5\x4f\x32\x79\x59\xff\x64\x7e\x75\xa4\xa4\x75\xa3\x5a\xeb\x03\x6d\x97\x54\x96\xd8\xdd\xfe\x9b\x8b\xab\xf9\xe2\xed\xbb\x9b\x8b\xc5\x6c\x9e\x0a\x56\xb9\xf9\x3b\xd6\xe1\x29\x21\x6e\x3c\x68\xc5\xfc\x3f\x03\x04\x70\x2e\x5c\x8a\x06\x41\x40\xb7\xa5\xab\xc4\x02\x17\x40\x9b\xd7\xca\xa4\xf6\xb5\xff\xdd\xbf\x06\xfb\x8b\xed\xd6\x2b\x2e\x77\x5f\xb9\xe5\x95\x62\x5b\x6c\xe0\x97\xc1\x2b\x1e\x0e\x56\xb9\x7e\xcd\xd3\xeb\x39\xd7\x67\x83\xc8\x1c\xb7\xdf\x93\xf0\x3c\x5e\x40\x87\xd7\xc5\x7a\x3e\x39\xf8\x7e\x73\xf0\xa6\xef\xbf\xe6\x1c\x98\x6d\xb4\x9f\xa5\xeb\x61\xbe\x79\xea\x6d\x5e\x75\x86\x63\x4e\xe4\x0f\x27\x29\xf1\x5d\x04\x91\x9e\x89\x57\x9c\x15\x6b\x13\x6e\x62\xf3\x90\x1c\x07\xae\x7e\x52\xd9\x4a\x6e\xcb\xce\xbb\x08\xdd\x09\x5c\x32\xe2\x13\xaa\xcb\x16\x4c\x14\xa1\x3f\xa1\x31\xe5\xff\x37\x00\x00\xff\xff\x96\xdf\x75\xfd\x84\x1a\x00\x00") +var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x7b\x6f\x1b\xb9\x11\xff\x5f\x9f\x82\x10\x0e\x48\x52\x74\x57\xb2\x9d\xe7\x02\x45\xa1\xda\xce\xc5\x38\x3b\x56\x65\xa7\x28\x50\x17\x01\x45\x8e\x56\x8c\xb8\xe4\x86\x0f\xc5\x8a\x2f\xdf\xbd\xe0\x63\x77\xb9\x7a\x24\x41\x9a\xc3\x39\x41\x22\x91\x33\xc3\xe1\xcc\x8f\xf3\x32\xa6\x15\xd3\x9a\x49\x71\x2a\xc5\x82\x95\xc5\x00\xa1\x9a\xdb\x92\x25\xdf\x11\xfa\x87\x65\x9c\x9e\xc1\x02\x5b\x6e\x74\x58\x42\x88\x78\x02\xab\xb0\x61\x52\x34\x8b\x08\xe1\x9a\xfd\x0b\x94\x93\x58\xa0\xf5\x51\xbb\x0c\x62\x5d\xa0\xff\xfc\xb7\xfd\xbe\x62\x82\x16\x7d\xc1\xe1\xc4\x96\x42\x81\x96\x56\x11\xd0\x9d\x6c\x84\x38\xab\x98\xd1\x05\x7a\xf8\x92\x2c\x2a\xf8\x68\x41\x27\xcb\x5e\xec\xf5\x1a\x94\x62\x14\x7e\x50\xe1\x44\xc1\x56\x52\xa2\xe1\x54\xd2\xa9\x02\x0d\xe6\xc7\xa4\x53\xa6\xf1\x9c\x43\x81\x16\x98\x6b\xd8\x3a\x34\x1a\x64\xd2\x77\x8d\x27\x92\x35\x08\xbd\x64\x0b\x93\x33\x39\xba\xa8\x70\x09\x53\xc9\x19\xd9\xfc\xa0\x53\xee\x81\x58\x47\x39\xb3\x3c\xb5\x73\x86\x2a\x6c\xc8\xd2\xcb\x9f\x08\x21\x8d\x17\xd7\x73\x44\x86\x56\xb0\x29\x10\x73\x24\x3a\xef\xa9\x45\x41\x6c\xb2\x56\x74\xc2\x83\xd0\x1a\x73\x0b\x05\x7a\x64\x94\x85\x47\xc9\x8e\xc0\x15\x14\x9d\x3a\x19\x05\xc1\x80\x26\x04\x52\xcc\xf6\xc1\x21\x6b\x51\x52\xa0\x5a\x52\x7d\x60\x6b\xee\xbc\xa8\x7b\x88\xf9\x00\xc4\x14\xc8\xe9\x91\x2c\xeb\x15\xab\xaf\xfd\x49\xdc\xeb\xf1\x1a\x33\x6e\x15\x6c\xd1\x05\x27\x25\xc6\x8f\xfe\xc1\x65\xa9\xa0\xc4\x46\xaa\xe4\x2d\x29\x79\xbf\x39\xe5\x0c\x84\xb9\x10\x0b\x19\x74\x27\xa0\xcc\x6b\xe6\xbc\xdf\xb1\x64\x0b\x25\x85\xc9\x3c\x7d\x4e\x94\xf1\x84\x2b\xd8\x7c\x95\x6e\x05\x9b\x01\xae\xd9\x25\xac\x81\xeb\x62\x90\x39\xdf\x6e\xb9\x1a\x5b\xb3\xec\xd4\x89\x2f\xe5\x0d\x60\x0a\x2a\x2a\xe3\x95\x3b\x9d\x14\x28\x91\x9c\x11\xdc\x2a\x11\x09\x64\x55\x49\xf1\x16\x57\x8d\x03\xb2\x03\x4a\x0d\x02\xb0\x8c\xc2\xe1\x94\xa9\x82\x05\xbb\xef\xb8\xfe\x9d\xcd\xa0\x92\x06\xb2\x73\x47\x93\xf9\xd5\x52\x49\x5b\x07\xf2\x5d\xba\x5f\xdd\xa6\x5f\xb4\x1a\x94\x43\xca\x21\xca\x77\x1a\xd4\x80\x48\x61\x94\xe4\x1c\x12\x2f\x00\x07\xd2\x3d\x08\x2e\xc9\xea\xad\x07\x5c\x0b\xdb\xac\xc2\xda\x80\xca\x3a\x66\x87\x16\x0d\x6a\xcd\x08\xdc\xb8\xff\x44\x79\x0a\x2a\x3e\x76\xcd\x4a\xd1\x98\x2f\xf5\x66\xa4\xcf\xc2\x7e\x6b\xc0\xc4\x8f\x5b\x14\xce\x7f\xc9\x91\x05\x7a\xf4\x97\x47\x03\x22\x95\x9e\x70\x2e\x3f\x01\xbd\x56\xac\x64\xc2\x7b\xf6\xf1\xdf\xd9\x93\xd1\xe8\xe8\xf8\xc5\x5d\x3e\xf6\x7f\x8f\x1e\x17\xbf\xdf\x7d\x7e\xd2\x6e\x71\x49\x30\x5f\x4a\x6d\xda\xf5\x87\x07\x54\x2b\x26\x0c\x1a\x06\x8a\x21\x7a\xfc\x4f\x2b\x0d\x5c\x81\xc1\xe8\x31\x13\x14\xee\x51\x7e\xe5\x2f\x9e\x5f\x4c\x35\x1a\x3f\xc9\x6f\x8c\x62\xa2\x7c\x82\x86\x4e\xc8\xdd\xe7\x27\x43\xf4\x3b\xfa\xe8\x78\xd0\x97\x2f\xed\x49\x2b\x3b\x07\x25\xc0\x80\xbe\xcb\x69\x08\x55\x5b\xba\xec\xa1\xb8\xcb\xf5\x9a\xdc\xe5\x84\x5b\x77\xde\x5d\xee\xd5\x3d\xc8\xf6\x7d\x77\xc8\xcf\xef\x8d\x43\x04\x0f\x97\x78\x23\xb5\x71\xf0\xf8\x86\xfa\xad\xd3\x77\xb5\xff\xfa\x69\xd1\x54\x3f\x7c\x8c\x37\xc1\x77\x5a\x6a\x8b\xec\xe8\xc5\xf1\x5d\x7e\xb2\xdf\xf1\x07\x0e\xfa\x86\xad\x5b\xae\xb8\x4e\x85\xee\xde\xcb\x9c\x09\x3a\xa1\x54\x81\xd6\x05\x1a\xe7\xfe\x4f\xf1\x72\xfc\xec\x24\xee\xbd\x05\xf3\x49\xaa\x55\x81\x0c\xa9\x9f\x0e\xc0\x10\xda\x0f\x71\x04\x17\x28\xbc\xa8\xdc\x6d\x76\xd1\xa4\x7b\x2b\xbd\x6d\xcf\x1c\x49\xda\xb7\xb2\x87\xc2\xbd\x17\x84\xac\xe2\xfe\xed\xa7\xfe\x5a\x1a\x53\xeb\xc2\x79\x6c\xdb\x4d\x68\x58\x1c\x9f\xbc\x78\xd5\xf3\x91\x93\x79\x63\xa4\xc2\x25\x74\x97\xee\x5c\x11\xb7\x42\xe4\x2a\x92\x8d\x9c\xc9\x7d\x84\xfd\xd4\xea\x2c\x7b\xe3\x2c\xbb\x25\x26\xcd\x91\x7b\xc8\x52\x21\x3e\xab\x76\x9a\x2d\xa4\xaa\xb0\x29\xd0\xd5\xe4\xe6\xf6\x7c\xf6\xfe\x7a\x76\xfe\xeb\xfb\x77\xb3\xcb\xec\x97\x07\x22\xab\x5a\x0a\x10\xe6\x4b\xf1\xcb\xc3\x3a\x48\x70\x25\x10\xc7\x06\xb4\x69\xaa\x0b\xb6\x9d\xaa\x9c\x50\x26\xc2\xdb\x99\x41\xc9\xb4\x51\x9b\xc6\x5e\x05\xa2\x92\xac\x40\x65\x2a\x6e\x34\xa8\x72\xa0\x2a\x9e\x8d\xc7\xe3\x41\x48\x80\xc1\xce\x31\xf7\x39\x9b\x70\x30\xbb\x30\x20\x38\x9b\x5b\x41\x39\x1c\x42\x40\xe4\xfc\x3a\x08\xb6\x88\x02\x0e\x6a\xa9\x4c\x81\x8e\xc6\xc7\xcf\xc6\x83\xce\x27\xa9\x5a\x4e\x09\x5c\x33\x17\xc0\x41\x4d\x54\x69\x2b\x10\x4d\x01\xab\xac\x30\xac\x82\x8c\x24\x75\x6e\xe6\xa8\xf5\x48\x83\x31\x4c\x94\x3a\x5f\xbd\x74\x2e\x1f\xad\x8f\x30\xaf\x97\xf8\xe8\x6f\x6d\x19\xa0\x83\xcf\xb2\x39\x26\x2b\x10\xb4\xe1\x76\xb8\x3a\xe9\x11\x54\x40\x19\xce\xcc\xa6\x86\xee\x84\x9a\x33\xe2\x0b\xaa\xd1\x5a\xd0\x3c\x41\x57\xad\xa4\x91\x73\xbb\x88\x69\x57\x5a\xea\x52\xea\x9a\xb5\xb9\x3a\x43\x43\xfc\xd9\x2a\x18\x26\x14\x7d\xfd\x87\x23\x30\x64\xe4\x89\xc2\xbf\xb9\xdb\x77\xf4\x5d\x9e\xd9\x32\x44\x0c\x13\x3e\x27\x31\x51\x66\xce\x43\xd9\xc2\x59\xbf\x27\x53\xfa\x5c\x34\x0a\x0e\x19\x85\x17\x3d\xdc\x2b\x60\x05\x9b\xef\xe1\x5f\xc1\x66\xf8\x87\xdc\xb4\x8a\x08\xb0\xc2\xc1\xa3\x5d\xb8\x98\x16\x2e\x60\x7c\x2d\xf1\xa5\x31\xc2\xe1\x8b\x9e\xaf\x99\x2f\x1c\x6e\x59\x05\xd2\x9a\x02\x09\xcb\xf9\xb7\xcb\xba\x88\xda\x58\x4a\xa5\xc0\xde\x85\x76\x8f\x28\x00\x5b\x93\x25\x50\xdb\xf3\x54\x73\x70\xbb\x15\x00\x1e\x24\xed\xb1\x6f\x4b\x97\x7f\xd0\xbe\x06\x8f\x95\x87\x7e\x2b\x29\x4c\xa5\x32\x33\x2c\x4a\x57\x89\x3f\x4a\xf6\x6e\xec\x5c\x80\xb3\xd9\x8b\xe3\xfc\xc4\x07\xfd\xd1\xd1\x73\xb7\xef\xea\x7f\xe2\x38\x43\xfd\xe7\xfa\xb8\x68\x64\xaf\xb6\x07\x12\xc4\x5c\xfc\x5b\x8b\xe7\xd3\x58\x38\x0a\x11\xaa\xaf\xad\x86\x0c\x13\x02\xb5\xdb\x36\x20\xcc\xed\xa6\x76\x82\xbf\xe3\x71\xfc\x35\xa5\x89\x97\x43\x68\x6e\x95\x8b\x77\x4f\xc7\xe3\x41\x6c\x83\x1a\xa9\xdf\x25\xd4\x33\x7d\xac\x75\x81\x8e\xbd\x84\xdd\xcb\xb8\x4f\x31\xa8\x04\xa3\xb5\xe1\xfc\x52\xca\xda\xc5\x81\x3f\xe1\xba\xcf\xff\xef\xeb\x9e\x78\x09\x3b\x77\x49\x6f\xbb\x5d\x24\x7b\x81\xe1\x2d\x46\x14\x4c\xed\x9c\x33\xf2\x6e\x76\x59\xec\xcf\xc8\xfb\xcb\x34\x34\x2c\xba\x6c\xed\x20\x99\xbe\x3e\x11\x2a\x8c\x2e\x8c\xc7\x20\x13\x2b\x8f\xd3\x8b\xb3\x99\x0b\xfd\xf9\xd1\xf1\xcb\x80\xd3\xa7\x3b\x34\xb1\x46\x20\x8c\xaa\x5d\x52\x84\x5c\xa9\x1c\x00\x7f\x09\xa2\x34\xcb\x02\xbd\x4a\x1c\x7f\x31\x4d\x4e\x8a\x92\x62\x21\x34\x72\x16\xdb\xcf\x1d\xb5\x9e\xfa\x01\x4a\x68\x31\x14\xd0\x25\x36\x5d\xb9\x95\xc9\xb5\xce\xb4\xe7\xec\x5e\x5e\xff\x56\xbd\xd7\x27\xfb\x0d\x1c\xd6\x1a\xcc\xcf\xb2\xf7\x70\x44\xa4\xd0\x92\xc3\x68\xd8\x8f\x7c\xa5\xc2\x1e\xc9\x6d\xb8\xad\xc0\x2c\x25\x2d\x10\xb6\xc6\xd5\x2e\x8c\x82\x30\xcc\x6c\xa6\x31\x66\xeb\x62\xf0\xf0\x90\x21\xb6\x40\xf9\xb9\xc0\x73\x0e\x93\xc9\xd9\xc4\x9a\xa5\xa3\x0a\x48\xf4\x62\xb3\xd8\xe7\x4f\x5c\xb8\x46\x93\xb3\x80\xdd\x25\xe6\x1c\x7c\x30\xea\x66\x21\x5c\x96\x4c\x24\x6d\x77\x85\xeb\x9a\x89\xf2\x2a\xaa\x41\x38\x66\x95\xdf\xe8\x67\x8d\x03\x83\x8e\x50\xaf\x5c\xd7\x20\x2e\xce\x2e\xb6\x54\x6f\x9a\xb8\x10\xcb\xcf\xbc\x45\x73\xaf\x60\xb8\x7f\x3e\x99\x9c\xc5\x40\x7f\xd6\x37\x52\xc7\x76\x03\x44\xb9\xb8\x79\x90\x35\x10\xec\x63\xc7\xac\x4a\x06\x1a\x8c\xa6\x33\x18\x6d\xe7\xed\xb7\x5a\xc1\x02\x94\x02\xfa\x2e\xf6\xc0\x29\xa1\x15\xec\xa3\x85\xf7\x6e\xb9\x5d\xdd\xa6\xe9\x6d\x42\x85\x19\x4f\x77\xfd\x42\xfc\xde\x94\xd7\xd1\xa0\xd6\x2c\xa5\x62\x9f\x61\x1f\xd6\xbc\x9b\xf2\x8a\x11\x25\xb5\x5c\x18\x29\x38\x13\x2e\x0f\x57\x0e\x85\xa9\x25\x6e\x41\x60\x6f\xc1\xe1\xc8\x03\xfa\x78\xd4\xca\x1d\xee\x9a\x05\x21\x23\x57\x20\x7e\xf2\x89\x5e\x66\xef\x34\x87\x59\x10\xb4\x87\xcd\x4b\xd7\x2b\xa1\x1a\x6b\xfd\x49\x2a\xba\x8d\xd0\x16\x90\x3f\x17\xa0\x8b\x43\x69\x7c\x69\xbc\x26\xb4\x07\xe4\x37\xb7\x53\xbf\x38\x8d\x4a\xee\x81\x74\xcc\xce\x93\xdd\xfa\xfb\x0f\x8b\xd8\x8d\xe8\x9f\x2c\x54\xc3\xd6\x74\xba\x5d\xba\xc2\xf7\x93\x12\x6e\x5c\x2a\xa2\x2e\x93\x35\xc9\x30\x6e\x87\xf0\xab\xb5\x48\x17\xc3\x43\xd4\x87\xcb\xa6\x40\x96\xe9\x40\x97\x6f\x70\xe5\x9e\x85\x87\x4e\xaa\x82\x4b\xe6\x5a\xdf\xba\xe5\x2d\x35\x5e\x3e\x6f\x6a\x90\x16\xe1\xfb\xc8\x9e\x8d\xc7\x83\x1a\x5b\xed\xe0\xda\x8d\x7f\x42\x00\xac\xb7\x3a\xb5\xb9\x94\x46\x1b\x85\xeb\xd0\xc2\x1d\x54\x3e\xf0\x35\x05\x5f\x9b\x71\x2e\xc4\x42\x61\x6d\x94\x25\xc6\xaa\x50\xc1\xd5\x98\xf4\xa6\x5f\xcc\x91\xa4\x3c\x37\x4b\xac\x80\xb6\x23\xd7\x7d\x4c\x83\x5a\xc9\x0f\x40\x92\x34\x11\xfb\x44\x57\x27\xde\xf8\x89\x9b\x54\x05\x12\x92\x42\xa6\x24\x87\xbc\xd7\x46\x8f\x5c\xe7\x6a\x0d\x34\x2d\x55\x14\x36\x0b\xf3\xc9\x2b\xd0\x1a\xb7\xe5\x69\x7f\xef\x16\xaa\xda\xb5\xb7\x6d\xed\x4a\xac\x62\x66\x33\xe1\x5c\x12\xec\x8e\x0c\xef\x91\xe8\x76\x25\x96\xba\x7a\x5c\x8c\x8e\x9b\xcd\x4b\x3c\x07\xae\xa7\xa0\xa6\x41\x78\x81\x9e\x85\x11\x23\xa3\xdb\x7c\x47\xe3\xe6\x27\x3b\x7a\xd5\xfc\x8c\xfc\xea\x40\x49\xeb\x3a\xc5\xce\x06\xda\xce\xa9\xac\xb0\x8b\x0d\xb7\xe7\x57\xd3\xd9\xf5\xbb\xdb\xf3\xd9\xc5\x34\x17\xac\x76\x6d\x7f\xcc\xf7\x13\x42\x5c\x77\xd2\xb1\xf9\x5f\x6b\x04\x70\xce\x5c\xa0\x07\x41\x40\x77\x09\xb1\xc2\x02\x97\x40\xdb\xe9\x6b\xd6\xd8\xda\x7f\xf6\xd3\x6d\xff\xec\xdd\x7a\xcd\xe5\xe6\x1b\x31\xa0\x56\x6c\x8d\x0d\xfc\xb6\x35\x95\xc4\x41\x2b\x57\x26\xfa\xfd\xa6\xcd\xf6\xb1\x22\x12\xc7\xe3\x77\x38\x3c\x8d\x67\xd0\x61\x5a\xda\xb4\x45\xbd\x51\x52\x17\x16\x9a\xa1\xd2\xd7\x9e\xfe\xee\xa4\x69\x4f\x8f\xa5\x7d\x6f\xdf\x0c\x17\xda\x59\x76\x3b\x71\xda\x6e\xb7\x22\x7d\xb8\x5a\x85\xef\x23\xaa\xf4\x85\x78\xcd\x59\xb9\x34\xe1\x69\xb6\x93\xf2\xd8\xf8\xf5\xa3\xcc\x5a\x72\x5b\x25\xf3\x19\xba\x11\xb8\x62\xc4\xc7\x5f\x17\x3e\x98\x28\x43\x19\x44\x63\x86\xf8\x5f\x00\x00\x00\xff\xff\x3e\x98\x14\x40\x65\x1b\x00\x00") func masterEtcOriginMasterMasterConfigYamlBytes() ([]byte, error) { return bindataRead( @@ -159,7 +159,7 @@ func masterEtcOriginMasterSchedulerJson() (*asset, error) { return a, nil } -var _masterEtcOriginMasterSessionSecretsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4a\x2c\xc8\x0c\x4b\x2d\x2a\xce\xcc\xcf\xb3\x52\x28\x33\xe4\xca\xce\xcc\x4b\xb1\x52\x08\x4e\x2d\x06\x89\x04\xa7\x26\x17\xa5\x96\x14\x73\x15\x43\x68\x2b\x2e\x5d\x85\xc4\xd2\x92\x8c\xd4\xbc\x92\xcc\xe4\xc4\x12\xb0\x16\xa5\xea\x6a\x05\x3d\xc7\xd2\x92\x0c\x88\x5a\x85\xda\x5a\x25\x2e\x05\x85\xd4\xbc\xe4\xa2\xca\x02\x24\x15\xae\x79\xc9\x48\x0a\x00\x01\x00\x00\xff\xff\x58\x97\xb9\x86\x74\x00\x00\x00") +var _masterEtcOriginMasterSessionSecretsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4a\x2c\xc8\x0c\x4b\x2d\x2a\xce\xcc\xcf\xb3\x52\x28\x33\xe4\xca\xce\xcc\x4b\xb1\x52\x08\x4e\x2d\x06\x89\x04\xa7\x26\x17\xa5\x96\x14\x73\x15\x43\x68\x2b\x2e\x5d\x85\xc4\xd2\x92\x8c\xd4\xbc\x92\xcc\xe4\xc4\x12\xb0\x96\xea\x6a\x05\x3d\xc7\xd2\x92\x0c\x88\x52\x85\x1a\x85\xc2\xd2\xfc\x92\x54\x85\xda\x5a\x2e\x05\x85\xd4\xbc\xe4\xa2\xca\x02\x84\x3a\xd7\xbc\x64\x4c\x65\x80\x00\x00\x00\xff\xff\xc1\xc1\xc9\xa5\x80\x00\x00\x00") func masterEtcOriginMasterSessionSecretsYamlBytes() ([]byte, error) { return bindataRead( @@ -199,7 +199,7 @@ func masterTmpAnsibleAnsibleSh() (*asset, error) { return a, nil } -var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x58\x6d\x6f\xdb\xc8\x11\xfe\xee\x5f\x31\x90\x03\xa8\x45\x23\xa9\x49\x0e\x68\x8f\x40\x3e\xf8\x7c\xcc\xd9\xb8\xb3\x65\x48\x4a\x91\xb4\x28\x88\x25\x77\x28\x6e\xbd\xda\xe1\xed\x2e\x25\xeb\x0c\xff\xf7\x62\xb8\x4b\xbd\xd0\x2f\xf1\x45\x9f\x44\x71\xe6\x99\x67\xe7\x7d\x35\x1a\x8d\x4e\x4e\xe1\xd3\xe5\x97\xab\x34\x81\x33\xb3\xf5\x95\x32\x4b\xc8\x51\xd3\x06\x94\x01\xa1\xf5\xa8\x10\xb5\x83\x4a\x38\x50\x7e\xe8\x60\x2d\x74\x83\x60\xb1\xd6\xa2\x40\x09\xf9\x16\x1c\x4a\x16\xf5\x15\x9e\x9c\x42\xfc\x50\x8d\xc6\x55\xaa\xf4\x2b\xe1\x3c\x5a\x57\x58\x55\xfb\xb1\xab\x60\x53\xa9\xa2\x02\xe5\xc0\x90\x07\x25\x51\xe8\xb7\xb0\x41\x70\x15\x35\x5a\x42\xa9\xee\xc0\x57\xc2\x8f\x4f\x34\x15\x42\x07\xe5\xe4\x04\xa0\x22\xe7\x1d\x7f\x01\x68\xdf\xf0\xf3\x09\xc0\x5a\xd8\xf8\xab\x30\x4e\xe5\x1a\xb3\x82\x8c\xc1\xc2\x2b\x32\x49\x90\x3c\x7a\x5b\x6f\x7d\x45\x26\x53\xc6\xa3\xad\x2d\x32\x38\x4c\x1a\x67\x27\xb9\x32\x93\xf0\xf2\xfd\xc9\xc9\xd1\x01\x32\xe5\x18\xd4\x0b\x65\xd0\xaa\x3f\x50\x26\xf0\x49\x68\x87\x3d\xa9\x42\x2b\x34\x3e\xcb\x95\x11\x76\x7b\x00\x4a\xc5\x23\xbc\x95\x58\x62\xe6\xc5\x32\x81\xc1\xfa\x5f\xe9\x6c\x7e\x39\xbd\x1e\x04\xa1\x53\xb8\x9e\x2e\xd2\x04\x16\x95\x72\x50\x54\xc2\x2c\xd1\xb1\x6b\x3f\x8c\xdf\xfd\xfd\x6f\x20\x8c\x0c\x21\x10\x5a\x43\x41\xab\x5c\x99\xd6\xf3\x9e\x40\x80\x53\x66\xa9\x91\x1d\xa2\x44\xae\x31\xc2\x85\x4f\x63\x24\x5a\x8e\x0f\x18\xb1\xc2\x03\x2a\x1b\xcc\xf9\x6c\x8e\x34\x46\x5a\x2c\xd0\xe3\x7b\x28\x54\x5b\x2c\xd5\x5d\x02\x83\xcb\xab\xb3\x5f\xd2\xec\x66\x96\x7e\xba\xfc\x32\x19\xbc\xa0\xb1\x87\xdd\x69\x2d\xbe\xde\xa4\xa3\x0d\xe6\xa3\x28\xf3\x92\xfa\x1a\xad\x6b\x63\xd9\x77\x15\x59\x5c\x66\x8d\xd5\x59\xcc\x11\x18\x1e\x51\x3a\xb0\xf4\xe6\xbe\xa0\x55\x4d\x06\x8d\x7f\x48\xde\xdc\x47\xc4\x87\x61\xcf\x6a\xc0\xc9\x24\x96\xa2\xd1\x3e\x73\x4d\x2e\x69\x25\x94\x49\x60\xb8\x48\xaf\x6e\x66\xd3\xcf\x8b\x74\x76\x79\x33\x36\xaa\x1e\x2b\x1a\x76\x01\x6b\x2b\x27\x7e\x6f\xc3\x16\x33\x39\x47\xf0\xdb\x1a\x3f\x2a\x53\x5a\xf1\x16\xf2\xc6\x73\x9a\x57\x62\x8d\xe0\x09\xb4\x5a\x23\x6c\x94\xaf\xc0\xe2\x52\x91\x09\x62\x50\x92\x05\x43\x9b\x08\x97\x63\x21\x1a\x87\x40\x25\x68\x5c\x8a\x62\x0b\x16\x85\x23\xe3\x7a\xcc\x2d\x35\xcc\xdc\xa1\xc6\xc2\x13\xbb\xe2\x10\xb4\x7f\x4e\x7e\xe7\xbc\xdd\xbe\x2c\x7f\x0a\xd7\xd4\x96\x27\x6c\xaa\x2d\x78\x3e\x98\x72\x20\x40\xaa\xb2\x44\x8b\xc6\x83\x14\x5e\xb4\x47\x0c\x87\x53\x1e\x54\x9f\x58\x6d\x69\x85\xbe\xc2\xc6\x65\x86\x24\x1e\x58\xbc\x8f\x26\x87\x09\x0c\x83\xd5\x87\xce\xa1\x73\xf4\x70\xf6\x47\x63\x11\x5c\x8d\x85\x2a\x55\x11\x4c\xb1\x6f\xb8\xea\x51\x42\x77\x84\x9e\xb9\xf0\xf6\xe0\x80\x9e\x2c\x27\x5f\x6d\x69\xad\x24\xe7\xc8\x40\x30\x70\x96\x6b\xca\xfb\x49\xf7\x9c\xf2\xad\x32\x32\x81\x01\xe5\xff\xc3\xc2\xbf\x56\x69\x6f\x26\x13\x45\x41\x8d\xf1\xa1\x00\x86\xb3\xf4\x97\xcb\xf9\x62\xf6\x35\x9b\x2f\xa6\x33\xce\xd0\xb3\x7f\x7f\x9e\xa5\xd9\xd9\xf9\xf9\xf4\xf3\xf5\xe2\xfa\xec\x2a\xed\x87\xeb\xf5\x26\x6e\x71\xfb\x4d\x0b\xbf\xa6\x5f\xbf\xc3\xc0\xae\xfd\x25\x30\xe8\xe4\xbe\xc3\x15\x16\x85\x5e\x25\x30\x28\xc8\xe2\x78\xa3\x8c\xa4\x8d\x1b\x1b\xf4\x83\x27\x6a\x29\xfe\x72\x21\xac\x84\x82\x24\x86\x14\x8c\xf5\x31\x3e\x92\x39\x6f\x2c\x27\xa4\xde\xb6\x1d\xae\xd0\x0d\xd7\x31\x38\x2f\x3c\x82\xf0\x20\xb1\xd6\xb4\x5d\x71\xca\x7a\xb5\x42\x90\x84\x61\xf8\x84\x5a\xe4\xa6\x48\x12\x5d\x04\x0b\xfc\xd1\xa2\xe4\x32\xe5\xb7\xa1\x2f\x84\x62\xd5\x22\x47\xed\x40\xd4\xb5\x56\x28\xdb\x86\x6c\x51\xc8\x2d\xcb\xe6\x08\xbf\x37\x68\x15\xca\x08\x25\x96\x42\x19\xe7\x99\x03\xe3\xd4\xa4\x8c\x6f\xe7\x24\xb3\x08\xf3\x32\x92\x6b\x47\x5e\x10\xd2\x62\x9b\x13\xdd\x3a\xb0\x8d\x19\xc3\x99\x76\xf4\x36\xc2\xf1\xeb\x50\xe9\xed\xf0\x55\x85\x80\x36\xec\x10\x3b\x16\x0c\x4a\xa1\xb5\x83\x5c\x14\xb7\x03\x26\xf4\x8e\x79\x5a\xaa\xad\x12\x1e\xf5\x16\x36\x15\x5a\x04\xe1\x0e\xf1\x62\xb4\x76\x88\x9a\x96\x5c\x6f\xd1\x45\x63\x58\xb4\x3a\x1b\xc1\x33\xc7\x11\x48\xe5\x8a\xc6\x71\xfb\x04\x91\x13\x57\x7d\x19\xd1\xc2\x5c\xda\xdb\x63\x02\x92\x42\xd4\x62\x9f\x6b\x2d\x38\xf8\xf8\x11\x42\xb3\x6b\xdd\xbe\x6b\x72\x0c\x10\xb1\x6a\xb4\x25\x16\x1c\xd0\x12\x45\x3b\xb7\x19\xad\x0d\x97\x30\x07\xca\xf1\xfc\x95\x5a\x56\xed\x78\x13\x9d\x4f\x9d\xb2\xbb\x28\x74\x76\x8f\x73\x66\x86\x6b\xe5\x94\x07\x2d\xd8\x9f\x7f\xa9\xc9\xb1\x9d\x2d\x37\x38\x81\x2b\x32\x0e\x3d\x90\x85\x37\xe4\x2b\xb4\x7f\x7d\x26\xd5\x43\xdb\xed\x0c\x24\xf0\xee\x1b\x25\x71\x28\xf9\x7c\xa7\x6c\x13\x37\x81\x41\x6d\xd1\xa1\x79\xd4\x72\xfa\x3d\x15\xef\x6a\xb2\xcc\x23\x0c\xda\xdd\xbc\x1c\xde\xcc\xa6\x57\xe9\xe2\x22\xfd\x3c\xcf\xd2\x2f\x37\xd3\xd9\x22\x9d\x65\x71\x84\x0e\xfb\xe6\x8f\x06\xb6\x71\x5e\x68\x9d\xc0\xc2\x36\xf8\x02\xcf\x60\xef\x75\x0b\xc1\xbe\x06\x33\x9e\x18\x09\x0c\xcf\xae\xe7\x97\x3f\xfd\x96\x66\x3f\xa7\x37\xbf\x4d\xbf\xb6\x83\x3a\xb2\xea\x56\x35\x87\x76\xad\x0a\xcc\x72\x4b\xb7\x7c\xbc\x23\x5a\xc7\xf0\x9d\x68\x21\xbc\xd0\xb4\x7c\x91\xda\xc1\x62\xd0\x67\xf9\x34\xcc\xde\xa3\xeb\x9d\xfb\x9e\xdc\x1c\xd0\x17\x92\x37\x12\x97\xc0\x7f\x06\x95\xf7\xb5\x4b\x26\x93\x8b\xe9\xbc\x6d\xeb\xc9\xfb\x0f\xff\xf8\x71\xf0\xdf\x70\x44\x8f\xab\x9a\x33\xaf\x7f\xc6\x97\x9c\xf9\x9c\xce\x73\xfb\x55\x27\x3f\x8a\xf2\xa3\x20\xff\x32\xd8\xb3\xdb\xd6\xd1\x62\xaa\x1c\x58\x94\x8d\x91\xc2\xf8\x30\xfc\x2d\xfe\xde\xa8\xd8\x33\x2b\x61\x24\x2f\xa2\x01\x0a\xdc\x2d\x6e\x20\x47\xbf\x41\x34\x47\x4b\xe9\xce\x7f\xa3\x18\x71\xee\x89\x53\xab\x96\x7c\xeb\x30\x12\xa6\xe7\x37\x3d\x3f\xe3\x9d\x58\xd5\x1a\xc3\x3a\xce\xb9\x74\x40\x77\x7e\x31\x9d\x2d\x38\xbf\x07\xfd\x94\x2d\xa8\xb8\xad\x55\x97\x83\x87\x5e\x3e\x9f\x9e\xff\x7a\x73\xb9\x78\x2e\x69\x1f\x29\xe6\xc2\x61\x74\x74\xa7\xfa\xd3\xd9\x3c\xe5\xf0\x7e\x53\x77\x4f\xb5\x53\x7d\xda\xc1\x3f\x53\x3b\x9b\x24\x96\xca\xe0\x37\x7b\x0e\x38\xda\xcf\x0e\x2f\xdc\x2d\x94\x6a\xb7\xfb\x07\xd9\xf1\x76\xa5\x41\x19\xa7\x64\x18\x76\x7d\x48\xb0\xa4\x11\x4a\x4b\xab\x27\x22\xb2\x51\x5a\x77\xd3\xac\xf1\x54\x53\xdd\x70\xda\xf0\xb2\xd7\xf0\x85\xe3\x49\xc4\xfe\x3e\xda\x46\x93\x47\xe3\x56\x99\xe5\xc1\xf0\x31\xcd\x2a\x47\xcb\xdb\xed\xc1\x3c\xe8\xa7\xdb\xfe\x46\xe3\xe2\xa5\x54\x58\xce\x14\x8f\xd6\x08\xcd\xf9\xf6\x98\x35\xdb\xdb\xe0\xd0\x1e\x5f\x82\xc2\x16\x18\x49\xaf\xa0\x1d\x6c\x7c\x73\x5a\x93\x92\x2d\x1f\x65\x0a\xde\x24\x78\xdf\x70\x9e\x69\x95\xa2\xf0\x50\x2a\x23\x3b\xde\x3b\x53\xe1\xca\x09\x7c\x09\x5b\x91\xe9\x9e\xf8\xd9\x94\x6a\xd9\x26\x4a\x02\x13\xf4\xc5\x84\xda\x84\x9e\xec\x24\x5e\x99\xc3\x41\x78\x7f\xfd\x8d\x47\x68\x72\xad\x8a\x5d\xa7\x6e\xac\x4e\x60\xd7\x69\xee\xef\x61\x9c\xde\x05\xbf\x5c\xb5\x8a\x17\xe4\xda\xbd\x13\x1e\x1e\x92\x7f\xfe\xf0\xc3\x87\xc9\xd1\x7d\xeb\x00\x50\xd4\xea\x4f\x83\xed\x41\x5e\xd3\xf7\xe0\x34\xfe\xc3\x70\x4d\xa0\xc9\x2c\xd1\x82\x41\x94\xbc\x43\x39\x76\xf5\xa3\x20\x8e\x3e\x8c\x7f\x1c\xbf\x7f\x3f\x7a\x17\x6f\x4e\x43\x8b\x6d\x61\x90\x09\x09\xdf\xb5\x97\x2d\xfa\x48\x84\xd3\x67\xef\x2a\x7e\x8a\x3b\x77\x47\x65\x78\x72\x42\x14\x9b\xb5\xcb\x3c\x65\x21\x5a\xcf\xfe\xbb\xf0\xff\x00\x00\x00\xff\xff\xce\xfa\x8f\x78\x1a\x11\x00\x00") +var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x58\x5d\x6f\xdb\x3a\xd2\xbe\xcf\xaf\x18\x38\x05\xfc\xbe\xd8\xda\xde\xb6\x07\xd8\x3d\x02\x7a\xe1\x93\xa3\x36\xc1\x69\xe2\xc0\x76\x16\xed\x2e\x16\x02\x25\x8e\x2c\x6e\x68\x8e\x4a\x52\x76\x7c\xb2\xf9\xef\x8b\x11\xe9\x2f\xe5\xa3\x41\x7d\x65\x59\x33\xcf\x3c\x9c\x6f\x7a\x30\x18\x9c\x9c\xc2\xa7\x8b\xaf\x97\x69\x02\x63\xb3\xf1\x95\x32\x0b\xc8\x51\xd3\x1a\x94\x01\xa1\xf5\xa0\x10\xb5\x83\x4a\x38\x50\xbe\xef\x60\x25\x74\x83\x60\xb1\xd6\xa2\x40\x09\xf9\x06\x1c\x4a\x16\xf5\x15\x9e\x9c\x42\xfc\x50\x8d\xc6\x55\xaa\xf4\x4b\xe1\x3c\x5a\x57\x58\x55\xfb\xa1\xab\x60\x5d\xa9\xa2\x02\xe5\xc0\x90\x07\x25\x51\xe8\xb7\xb0\x46\x70\x15\x35\x5a\x42\xa9\xee\xc0\x57\xc2\x0f\x4f\x34\x15\x42\x07\xe5\xe4\x04\xa0\x22\xe7\x1d\x7f\x01\x68\xdf\xf0\xf3\x09\xc0\x4a\xd8\xf8\xab\x30\x4e\xe5\x1a\xb3\x82\x8c\xc1\xc2\x2b\x32\x49\x90\x3c\x7a\x5b\x6f\x7c\x45\x26\x53\xc6\xa3\xad\x2d\x32\x38\x8c\x1a\x67\x47\xb9\x32\xa3\xf0\xf2\xfd\xc9\xc9\xd1\x01\x32\xe5\x18\xd4\x0b\x65\xd0\xaa\x3f\x51\x26\xf0\x49\x68\x87\x1d\xa9\x42\x2b\x34\x3e\xcb\x95\x11\x76\x73\x00\x4a\xc5\x23\xbc\xa5\x58\x60\xe6\xc5\x22\x81\xde\xea\x1f\xe9\x74\x76\x31\xb9\xea\x05\xa1\x53\xb8\x9a\xcc\xd3\x04\xe6\x95\x72\x50\x54\xc2\x2c\xd0\xb1\x6b\x3f\x0c\xdf\xfd\xf5\x2f\x20\x8c\x0c\x21\x10\x5a\x43\x41\xcb\x5c\x99\xd6\xf3\x9e\x40\x80\x53\x66\xa1\x91\x1d\xa2\x44\xae\x31\xc2\x85\x4f\x63\x24\x5a\x8e\x0f\x18\xb1\xc4\x03\x2a\x6b\xcc\xf9\x6c\x8e\x34\x46\x5a\x2c\xd0\xe1\x7b\x28\x54\x5b\x2c\xd5\x5d\x02\xbd\x8b\xcb\xf1\xe7\x34\xbb\x9e\xa6\x9f\x2e\xbe\x8e\x7a\x2f\x68\xec\x61\x77\x5a\xf3\x6f\xd7\xe9\x60\x8d\xf9\x20\xca\xbc\xa4\xbe\x42\xeb\xda\x58\x76\x5d\x45\x16\x17\x59\x63\x75\x16\x73\x04\xfa\x97\xe3\xd9\x3c\x9d\x66\x93\x69\xfa\x39\xbb\x99\x7e\x19\xbc\xb9\x2f\x68\x59\x93\x41\xe3\x1f\x92\x37\xf7\x11\xe9\xa1\xdf\xb1\x16\xf4\x33\x89\xa5\x68\xb4\xcf\x5c\x93\x4b\x5a\x0a\x65\x12\xe8\xcf\xd3\xcb\xeb\xe9\xe4\x66\x9e\x4e\x2f\xae\x87\x46\xd5\x43\x45\xfd\x6d\xa0\xda\x8a\x89\xdf\xdb\x70\xc5\x0c\xce\x11\xfc\xa6\xc6\x8f\xca\x94\x56\xbc\x85\xbc\xf1\x9c\xde\x95\x58\x21\x78\x02\xad\x56\x08\x6b\xe5\x2b\xb0\xb8\x50\x64\x82\x18\x94\x64\xc1\xd0\x3a\xc2\xe5\x58\x88\xc6\x21\x50\x09\x1a\x17\xa2\xd8\x80\x45\xe1\xc8\xb8\x0e\x73\x4b\x0d\x33\x77\xa8\xb1\xf0\xc4\x2e\x38\x04\xed\x9e\x93\xdf\x39\x6f\x37\x2f\xcb\x9f\xc2\x15\xb5\x65\x09\xeb\x6a\x03\x9e\x0f\xa6\x1c\x08\x90\xaa\x2c\xd1\xa2\xf1\x20\x85\x17\xed\x11\xc3\xe1\x94\x07\xd5\x25\x56\x5b\x5a\xa2\xaf\xb0\x71\x99\x21\x89\x07\x16\xef\xa3\xc9\x7e\x02\xfd\x60\xf5\x61\xeb\xd0\x19\x7a\x18\xff\xd9\x58\x04\x57\x63\xa1\x4a\x55\x04\x53\xec\x1b\xae\x76\x94\xb0\x3d\x42\xc7\x5c\x78\x7b\x70\x40\x4f\x96\x93\xae\xb6\xb4\x52\x92\x73\xa3\x27\x18\x38\xcb\x35\xe5\xdd\x64\x7b\x4e\xf9\x56\x19\x99\x40\x8f\xf2\xff\x60\xe1\x5f\xab\xb4\x37\x93\x89\xa2\xa0\xc6\xf8\x90\xf8\xfd\x69\xfa\xf9\x62\x36\x9f\x7e\xcb\x66\xf3\xc9\x94\x6b\x60\xfc\xcf\x9b\x69\x9a\x8d\xcf\xce\x26\x37\x57\xf3\xab\xf1\x65\xda\x0d\xd7\xeb\x4d\xdc\xe2\xe6\x87\x16\xfe\x48\xbf\xfd\x84\x81\x5d\xdb\x4b\xa0\xb7\x95\xfb\x09\x57\x58\x14\x7a\x99\x40\xaf\x20\x8b\xc3\xb5\x32\x92\xd6\x6e\x68\xd0\xf7\x9e\xa8\xa5\xf8\xcb\xb9\xb0\x12\x0a\x92\x18\x52\x30\xd6\xc7\xf0\x48\xe6\xac\xb1\x9c\x90\x7a\xd3\x76\xb6\x42\x37\x5c\xc7\xe0\xbc\xf0\x08\xc2\x83\xc4\x5a\xd3\x66\xc9\x29\xeb\xd5\x12\x41\x12\x86\xa1\x13\x6a\x91\x9b\x21\x49\x74\x11\x2c\xf0\x47\x8b\x92\xcb\x94\xdf\x86\xbe\x10\x8a\x55\x8b\x1c\xb5\x03\x51\xd7\x5a\xa1\x6c\x1b\xb1\x45\x21\x37\x2c\x9b\x23\x7c\x6f\xd0\x2a\x94\x11\x4a\x2c\x84\x32\xce\x33\x07\xc6\xa9\x49\x19\xdf\xce\x47\x66\x11\xe6\x64\x24\xd7\x8e\xba\x20\xa4\xc5\x26\x27\xba\x75\x60\x1b\x33\x84\xb1\x76\xf4\x36\xc2\xf1\xeb\x50\xe9\xed\xd0\x55\x85\x80\x36\xec\x10\x3b\x16\xf4\x4a\xa1\xb5\x83\x5c\x14\xb7\x3d\x26\xf4\x8e\x79\x5a\xaa\xad\x12\x1e\xf5\x06\xd6\x15\x5a\x04\xe1\x0e\xf1\x62\xb4\x76\x88\x9a\x16\x5c\x6f\xd1\x45\x43\x98\xb7\x3a\x6b\xc1\xb3\xc6\x11\x48\xe5\x8a\xc6\x71\xfb\x04\x91\x13\x57\x7d\x19\xd1\xc2\x3c\xda\xdb\x63\x02\x92\x42\xd4\x62\x9f\x6b\x2d\x38\xf8\xf8\x11\x42\xb3\x6b\xdd\xbe\x6b\x72\x0c\x10\xb1\x6a\xb4\x25\x16\x1c\xd0\x12\x45\x3b\xaf\x19\xad\x0d\x97\x30\x07\xca\xf1\xfc\x95\x5a\x54\xed\x58\x13\x5b\x9f\x3a\x65\x77\x51\xd8\xda\x3d\xce\x99\x29\xae\x94\x53\x1e\xb4\x60\x7f\xfe\x5f\x4d\x8e\xed\x6c\xb8\xc1\x09\x5c\x92\x71\xe8\x81\x2c\xbc\x21\x5f\xa1\xfd\xff\x67\x52\x3d\xb4\xdd\xad\x81\x04\xde\xfd\xa0\x24\x0e\x25\x9f\xef\x94\x6d\xe2\x26\xd0\xab\x2d\x3a\x34\x8f\x5a\x4e\xb7\xa7\xe2\x5d\x4d\x96\x79\x84\x01\xbb\x9b\x93\xfd\xeb\xe9\xe4\x32\x9d\x9f\xa7\x37\xb3\x2c\xfd\x7a\x3d\x99\xf2\x4c\x8c\xa3\xb3\xdf\x35\x7f\x34\xa8\x8d\xf3\x42\xeb\x04\xe6\xb6\xc1\x17\x78\x06\x7b\xaf\x5b\x04\xf6\x35\x98\xf1\xc4\x48\xa0\x3f\xbe\x9a\x5d\xfc\xf6\x25\xcd\x7e\x4f\xaf\xbf\x4c\xbe\xb5\xab\x40\x64\xb5\x5d\xd1\x1c\xda\x95\x2a\x30\xcb\x2d\xdd\xf2\xf1\x8e\x68\x1d\xc3\x6f\x45\x0b\xe1\x85\xa6\xc5\x8b\xd4\x0e\x56\x8f\x2e\xcb\xa7\x61\xf6\x1e\x5d\xed\xdc\xf7\xe4\xe6\x80\xbe\x90\xbc\x89\xb8\x04\xfe\xd5\xab\xbc\xaf\x5d\x32\x1a\x9d\x4f\x66\x6d\x5b\x4f\xde\x7f\xf8\xdb\xaf\xbd\x7f\x87\x23\x7a\x5c\xd6\x9c\x79\xdd\x33\xbe\xe4\xcc\xe7\x74\x9e\xdb\xab\xb6\xf2\x83\x28\x3f\x08\xf2\x2f\x83\x3d\xbb\x65\x1d\x2d\xa4\xca\x81\x45\xd9\x18\x29\x8c\x0f\xc3\xdf\xe2\xf7\x46\xc5\x9e\x59\x09\x23\x79\x01\x0d\x50\xe0\x6e\x71\x0d\x39\xfa\x35\xa2\x39\x5a\x46\x77\xfe\x1b\xc4\x88\x73\x4f\x9c\x58\xb5\xe0\xdb\x86\x91\x30\x39\xbb\xee\xf8\x19\xef\xc4\xb2\xd6\x18\xd6\x70\xce\xa5\x03\xba\xb3\xf3\xc9\x74\xce\xf9\xdd\xeb\xa6\x6c\x41\xc5\x6d\xad\xb6\x39\x78\xe8\xe5\xb3\xc9\xd9\x1f\xd7\x17\xf3\xe7\x92\xf6\x91\x62\x2e\x1c\x46\x47\x6f\x55\x7f\x1b\xcf\x52\x0e\xef\x0f\x75\xf7\x54\xb7\xaa\x4f\x3b\xf8\x77\x6a\x67\x93\xc4\x52\x19\xfc\x61\xcf\x01\x47\xfb\xd9\xe1\x85\xbb\x85\x52\xed\x76\xfe\x20\x3b\xdc\x2c\x35\x28\xe3\x94\x0c\xc3\xae\x0b\x09\x96\x34\x42\x69\x69\xf9\x44\x44\xd6\x4a\xeb\xed\x34\x6b\x3c\xd5\x54\x37\x9c\x36\xbc\xec\x35\x7c\xd1\x78\x12\xb1\xbb\x8f\xb6\xd1\xe4\xd1\xb8\x51\x66\x71\x30\x7c\x4c\xb3\xcc\xd1\xf2\x76\x7b\x30\x0f\xba\xe9\xb6\xbf\xc9\xb8\x78\x19\x15\x96\x33\xc5\xa3\x35\x42\x73\xbe\x3d\x66\xcd\xf6\xd6\xd8\xb7\xc7\x97\x9f\xb0\x05\x46\xd2\x4b\x68\x07\x1b\xdf\x98\x56\xa4\x64\xcb\x47\x99\x82\x37\x09\xde\x37\x9c\x67\x5a\xa5\x28\x3c\x94\xca\xc8\x2d\xef\x9d\xa9\x70\xd5\x04\xbe\x7c\x2d\xc9\x6c\x9f\xf8\xd9\x94\x6a\xd1\x26\x4a\x02\x23\xf4\xc5\x88\xda\x84\x1e\xed\x24\x5e\x99\xc3\x41\x78\x7f\xed\x8d\x47\x68\x72\xad\x8a\x5d\xa7\x6e\xac\x4e\xe0\xfe\x1e\x6a\xcb\x9b\xc5\xae\xe5\xf4\x60\x98\xde\x05\xff\x5c\xb6\x00\xe7\xe4\xda\xfd\x13\x7a\xc9\xdf\x7f\xf9\xe5\xc3\x68\x7b\xdd\x82\xff\xc2\xf7\x86\x3c\xc2\xc3\x43\xd7\x84\xa8\xd5\xcf\xc2\x3f\x09\xfb\x9a\xde\x08\xa7\xf1\xdf\x87\x2b\x02\x4d\x66\x81\x16\x0c\xa2\xe4\x3d\xcb\x71\x38\x1e\x05\x7a\xf0\x61\xf8\xeb\xf0\xfd\xfb\xc1\xbb\x78\xbb\xea\x5b\x6c\x8b\x87\x4c\x28\x8a\x6d\x0b\xda\xa0\x8f\x44\x38\xc5\xf6\xee\xe4\xa7\xb8\x97\x6f\xa9\xf4\x4f\x4e\x88\x62\x43\x77\x99\xa7\x2c\x44\xf4\xd9\x7f\x1e\xfe\x17\x00\x00\xff\xff\x14\x17\x6b\xa6\x36\x11\x00\x00") func masterTmpAnsibleAzureLocalMasterInventoryYmlBytes() ([]byte, error) { return bindataRead( @@ -299,7 +299,7 @@ func masterTmpBootstrapconfigsMasterConfigYaml() (*asset, error) { return a, nil } -var _nodeEtcAzureAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2a\x49\xcd\x4b\xcc\x2b\xf1\x4c\xb1\x52\xa8\xae\x56\xd0\x73\xac\x2a\x2d\x4a\x75\xce\xcf\x4b\xcb\x4c\xd7\x0b\x81\xc8\xb8\x28\xd4\xd6\x72\x15\x97\x26\x15\x27\x17\x65\x16\x94\x64\xe6\xe7\x61\x53\x1b\x8c\x2c\x0f\xd6\x91\x98\x98\xe2\x9c\x93\x99\x8a\xdd\x68\x47\x47\x17\xa8\x24\xaa\xda\xe0\xd4\xe4\xa2\xd4\x12\x3c\xea\x21\x0a\xa0\x7a\x42\x88\x72\x7a\x51\x6a\x71\x7e\x69\x51\x72\xaa\x7b\x51\x7e\x69\x01\xa6\xd2\x20\x64\x69\x90\xfa\x9c\xfc\xe4\x44\x90\x37\x30\x95\xfa\x40\x65\x40\xaa\x00\x01\x00\x00\xff\xff\x69\xfe\xce\x7d\x37\x01\x00\x00") +var _nodeEtcAzureAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x90\x51\x8a\x83\x30\x10\x86\xdf\xf7\x14\x39\x81\x07\xd8\xb7\xa0\xb0\x2c\x2c\xcb\xb2\x7a\x81\x31\x4e\xcb\x40\x4c\xec\x38\x29\x58\xeb\xdd\x4b\xab\x0f\x51\xd3\x42\x9f\xbf\xff\xfb\x87\xf9\x05\x1d\x38\xf9\x6e\x3e\xd5\x38\xaa\x4c\x5f\x02\x63\xee\xdd\x81\x8e\x59\x35\x93\x42\x5d\xd5\x29\x78\x41\x35\x4d\x1f\x7d\xa8\x7b\xc3\xd4\x09\x79\x97\x72\xca\x98\xaf\x4c\x80\x26\xb7\x84\xe9\x53\x5a\x17\x0b\x4c\x3b\x25\x1a\x46\x79\xe1\xcd\x81\x8d\x5b\xbd\xf5\x1a\x63\xef\x03\x1b\xfc\x62\x1f\xba\xbd\xf2\x1f\xe3\xd8\xb3\xde\xc0\xfd\xdd\xbd\xf2\xb3\x90\xd5\x80\x68\x02\x93\x0c\x8f\x9a\x5f\x68\x31\xb1\xe1\x36\x12\xfb\x1d\x53\x0b\x3c\xe8\x33\x90\x85\x9a\x2c\xc9\x50\xa2\xa4\x8b\xfe\x9e\x66\xe3\xc6\x5b\x00\x00\x00\xff\xff\x34\x7d\xff\xaa\x01\x02\x00\x00") func nodeEtcAzureAzureConfBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/openshift/certgen/release39/templates/master/etc/etcd/etcd.conf b/pkg/openshift/certgen/release39/templates/master/etc/etcd/etcd.conf index 4762d60b51..c148e25cc3 100644 --- a/pkg/openshift/certgen/release39/templates/master/etc/etcd/etcd.conf +++ b/pkg/openshift/certgen/release39/templates/master/etc/etcd/etcd.conf @@ -1,26 +1,26 @@ -ETCD_NAME={{ .Master.Hostname }} -ETCD_LISTEN_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380 +ETCD_NAME={{ .Master.Hostname | shellQuote }} +ETCD_LISTEN_PEER_URLS={{ print "https://" (index .Master.IPs 0).String ":2380" | shellQuote }} ETCD_DATA_DIR=/var/lib/etcd/ #ETCD_WAL_DIR="" #ETCD_SNAPSHOT_COUNT=10000 ETCD_HEARTBEAT_INTERVAL=500 ETCD_ELECTION_TIMEOUT=2500 -ETCD_LISTEN_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379 +ETCD_LISTEN_CLIENT_URLS={{ print "https://" (index .Master.IPs 0).String ":2379" | shellQuote }} #ETCD_MAX_SNAPSHOTS=5 #ETCD_MAX_WALS=5 #ETCD_CORS= #[cluster] -ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380 -ETCD_INITIAL_CLUSTER={{ .Master.Hostname }}=https://{{ (index .Master.IPs 0).String }}:2380 +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ print "https://" (index .Master.IPs 0).String ":2380" | shellQuote }} +ETCD_INITIAL_CLUSTER={{ print .Master.Hostname "=https://" (index .Master.IPs 0).String ":2380" | shellQuote }} ETCD_INITIAL_CLUSTER_STATE=new ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster-1 #ETCD_DISCOVERY= #ETCD_DISCOVERY_SRV= #ETCD_DISCOVERY_FALLBACK=proxy #ETCD_DISCOVERY_PROXY= -ETCD_ADVERTISE_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379 +ETCD_ADVERTISE_CLIENT_URLS={{ print "https://" (index .Master.IPs 0).String ":2379" | shellQuote }} #ETCD_STRICT_RECONFIG_CHECK="false" #ETCD_AUTO_COMPACTION_RETENTION="0" #ETCD_ENABLE_V2="true" diff --git a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml index 9a62edd9dc..d522e1d843 100644 --- a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml +++ b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/master-config.yaml @@ -60,13 +60,13 @@ controllers: '*' corsAllowedOrigins: - (?i)//127\.0\.0\.1(:|\z) - (?i)//localhost(:|\z) -- (?i)//{{ QuoteMeta (index .Master.IPs 0).String }}(:|\z) +- {{ print "(?i)//" (QuoteMeta (index .Master.IPs 0).String) "(:|\\z)" | quote }} - (?i)//kubernetes\.default(:|\z) - (?i)//kubernetes\.default\.svc\.cluster\.local(:|\z) - (?i)//kubernetes(:|\z) -- (?i)//{{ QuoteMeta .ExternalMasterHostname }}(:|\z) +- {{ print "(?i)//" (QuoteMeta .ExternalMasterHostname) "(:|\\z)" | quote }} - (?i)//openshift\.default(:|\z) -- (?i)//{{ QuoteMeta .Master.Hostname }}(:|\z) +- {{ print "(?i)//" (QuoteMeta .Master.Hostname) "(:|\\z)" | quote }} - (?i)//openshift\.default\.svc(:|\z) - (?i)//kubernetes\.default\.svc(:|\z) - (?i)//172\.30\.0\.1(:|\z) @@ -80,14 +80,14 @@ etcdClientInfo: certFile: master.etcd-client.crt keyFile: master.etcd-client.key urls: - - https://{{ .Master.Hostname }}:2379 + - {{ print "https://" .Master.Hostname ":2379" | quote }} etcdStorageConfig: kubernetesStoragePrefix: kubernetes.io kubernetesStorageVersion: v1 openShiftStoragePrefix: openshift.io openShiftStorageVersion: v1 imageConfig: - format: IMAGE_PREFIX/IMAGE_TYPE-${component}:${version} + format: MASTER_OREG_URL-${component}:${version} latest: false imagePolicyConfig: internalRegistryHostname: docker-registry.default.svc:5000 @@ -119,7 +119,7 @@ kubernetesMasterConfig: cloud-config: - "/etc/azure/azure.conf" masterCount: 1 - masterIP: {{ (index .Master.IPs 0).String }} + masterIP: {{ (index .Master.IPs 0).String | quote }} podEvictionTimeout: null proxyClientInfo: certFile: master.proxy-client.crt @@ -142,7 +142,7 @@ masterClients: contentType: application/vnd.kubernetes.protobuf qps: 300 openshiftLoopbackKubeConfig: openshift-master.kubeconfig -masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} +masterPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} networkConfig: clusterNetworkCIDR: 10.128.0.0/14 clusterNetworks: @@ -154,7 +154,7 @@ networkConfig: networkPluginName: redhat/openshift-ovs-subnet serviceNetworkCIDR: 172.30.0.0/16 oauthConfig: - assetPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }}/console/ + assetPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port "/console/" | quote }} grantConfig: method: auto identityProviders: @@ -166,8 +166,8 @@ oauthConfig: provider: apiVersion: v1 kind: OpenIDIdentityProvider - clientID: {{ .AzureConfig.AADClientID }} - clientSecret: {{ .AzureConfig.AADClientSecret }} + clientID: {{ .AzureConfig.AADClientID | quote }} + clientSecret: {{ .AzureConfig.AADClientSecret | quote }} claims: id: - sub @@ -178,8 +178,8 @@ oauthConfig: email: - email urls: - authorize: https://login.microsoftonline.com/{{ .AzureConfig.TenantID }}/oauth2/authorize - token: https://login.microsoftonline.com/{{ .AzureConfig.TenantID }}/oauth2/token + authorize: {{ print "https://login.microsoftonline.com/" .AzureConfig.TenantID "/oauth2/authorize" | quote }} + token: {{ print "https://login.microsoftonline.com/" .AzureConfig.TenantID "/oauth2/token" | quote }} {{- end}} - name: Local password challenge: true @@ -190,8 +190,8 @@ oauthConfig: file: /etc/origin/master/htpasswd kind: HTPasswdPasswordIdentityProvider masterCA: ca-bundle.crt - masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} - masterURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} + masterPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} + masterURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} sessionConfig: sessionMaxAgeSeconds: 3600 sessionName: ssn @@ -225,7 +225,7 @@ serviceAccountConfig: publicKeyFiles: - serviceaccounts.public.key servingInfo: - bindAddress: 0.0.0.0:{{ .Master.Port }} + bindAddress: {{ print "0.0.0.0:" .Master.Port | quote }} bindNetwork: tcp4 certFile: master.server.crt clientCA: ca.crt diff --git a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/session-secrets.yaml b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/session-secrets.yaml index e958ea1d0f..d440e83292 100644 --- a/pkg/openshift/certgen/release39/templates/master/etc/origin/master/session-secrets.yaml +++ b/pkg/openshift/certgen/release39/templates/master/etc/origin/master/session-secrets.yaml @@ -1,5 +1,5 @@ apiVersion: v1 kind: SessionSecrets secrets: -- authentication: "{{ .AuthSecret }}" - encryption: "{{ .EncSecret }}" +- authentication: {{ .AuthSecret | quote }} + encryption: {{ .EncSecret | quote }} diff --git a/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml b/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml index 790f0c7d52..a98b70883f 100644 --- a/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml +++ b/pkg/openshift/certgen/release39/templates/master/tmp/ansible/azure-local-master-inventory.yml @@ -19,7 +19,7 @@ localmaster: openshift_web_console_image_name: "IMAGE_TYPE-web-console" openshift_web_console_version: "vVERSION" - oreg_url_master: 'IMAGE_PREFIX/IMAGE_TYPE-${component}:${version}' + oreg_url_master: 'MASTER_OREG_URL-${component}:${version}' openshift_master_default_subdomain: 'TEMPROUTERIP.nip.io' # FIXME @@ -92,8 +92,8 @@ localmaster: config_base: /etc/origin/ examples_content_version: "vSHORT_VER" master: - public_console_url: "https://{{ .ExternalMasterHostname }}:8443/console" - public_api_url: "https://{{ .ExternalMasterHostname }}:8443" + public_console_url: {{ print "https://" .ExternalMasterHostname ":8443/console" | quote }} + public_api_url: {{ print "https://" .ExternalMasterHostname ":8443" | quote }} etcd_urls: ["https://HOSTNAME:2379"] #FIXME: No longer needed as of openshift-ansible-3.9.22-1 but we're not on that version yet node: nodename: 'HOSTNAME' diff --git a/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf b/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf index 5241e2afa2..870b2aad1d 100644 --- a/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf +++ b/pkg/openshift/certgen/release39/templates/node/etc/azure/azure.conf @@ -1,7 +1,9 @@ -tenantId: {{ .AzureConfig.TenantID }} -subscriptionId: {{ .AzureConfig.SubscriptionID }} -aadClientId: {{ .AzureConfig.AADClientID }} -aadClientSecret: {{ .AzureConfig.AADClientSecret }} -aadTenantId: {{ .AzureConfig.TenantID }} -resourceGroup: {{ .AzureConfig.ResourceGroup }} -location: {{ .AzureConfig.Location }} +tenantId: {{ .AzureConfig.TenantID | quote }} +subscriptionId: {{ .AzureConfig.SubscriptionID | quote }} +aadClientId: {{ .AzureConfig.AADClientID | quote }} +aadClientSecret: {{ .AzureConfig.AADClientSecret | quote }} +aadTenantId: {{ .AzureConfig.TenantID | quote }} +resourceGroup: {{ .AzureConfig.ResourceGroup | quote }} +location: {{ .AzureConfig.Location | quote }} +securityGroupName: {{ .AzureConfig.SecurityGroupName | quote }} +primaryAvailabilitySetName: {{ .AzureConfig.PrimaryAvailabilitySetName | quote }} diff --git a/pkg/openshift/certgen/unstable/config.go b/pkg/openshift/certgen/unstable/config.go index e5c9b1c716..f7fe5d8739 100644 --- a/pkg/openshift/certgen/unstable/config.go +++ b/pkg/openshift/certgen/unstable/config.go @@ -27,12 +27,14 @@ type Config struct { // AzureConfig represents the azure.conf configuration type AzureConfig struct { - TenantID string - SubscriptionID string - AADClientID string - AADClientSecret string - ResourceGroup string - Location string + TenantID string + SubscriptionID string + AADClientID string + AADClientSecret string + ResourceGroup string + Location string + SecurityGroupName string + PrimaryAvailabilitySetName string } // Master represents an OpenShift master configuration diff --git a/pkg/openshift/certgen/unstable/defaults.go b/pkg/openshift/certgen/unstable/defaults.go index cfdc6aafe2..a6382718e4 100644 --- a/pkg/openshift/certgen/unstable/defaults.go +++ b/pkg/openshift/certgen/unstable/defaults.go @@ -30,12 +30,14 @@ func OpenShiftSetDefaultCerts(a *api.Properties, orchestratorName, clusterID str ClusterPassword: a.OrchestratorProfile.OpenShiftConfig.ClusterPassword, EnableAADAuthentication: a.OrchestratorProfile.OpenShiftConfig.EnableAADAuthentication, AzureConfig: AzureConfig{ - TenantID: a.AzProfile.TenantID, - SubscriptionID: a.AzProfile.SubscriptionID, - AADClientID: a.ServicePrincipalProfile.ClientID, - AADClientSecret: a.ServicePrincipalProfile.Secret, - ResourceGroup: a.AzProfile.ResourceGroup, - Location: a.AzProfile.Location, + TenantID: a.AzProfile.TenantID, + SubscriptionID: a.AzProfile.SubscriptionID, + AADClientID: a.ServicePrincipalProfile.ClientID, + AADClientSecret: a.ServicePrincipalProfile.Secret, + ResourceGroup: a.AzProfile.ResourceGroup, + Location: a.AzProfile.Location, + SecurityGroupName: fmt.Sprintf("%s-master-%s-nsg", orchestratorName, clusterID), + PrimaryAvailabilitySetName: fmt.Sprintf("compute-availabilityset-%s", clusterID), }, } diff --git a/pkg/openshift/certgen/unstable/files.go b/pkg/openshift/certgen/unstable/files.go index b86cc84c71..6fd5525989 100644 --- a/pkg/openshift/certgen/unstable/files.go +++ b/pkg/openshift/certgen/unstable/files.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "os" "regexp" + "strconv" "strings" "text/template" @@ -112,6 +113,10 @@ func (c *Config) WriteMasterFiles(fs filesystem.Writer) error { h, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) return string(h), err }, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, }).Parse(string(tb)) if err != nil { return err @@ -146,6 +151,10 @@ func (c *Config) WriteNodeFiles(fs filesystem.Writer) error { t, err := template.New("template").Funcs(template.FuncMap{ "QuoteMeta": regexp.QuoteMeta, + "quote": strconv.Quote, + "shellQuote": func(s string) string { + return `'` + strings.Replace(s, `'`, `'\''`, -1) + `'` + }, }).Parse(string(tb)) if err != nil { return err diff --git a/pkg/openshift/certgen/unstable/templates/bindata.go b/pkg/openshift/certgen/unstable/templates/bindata.go index 0046d9424c..ed0ded62b7 100644 --- a/pkg/openshift/certgen/unstable/templates/bindata.go +++ b/pkg/openshift/certgen/unstable/templates/bindata.go @@ -78,7 +78,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _masterEtcEtcdEtcdConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x53\x5d\x6f\xea\x38\x10\x7d\xcf\xaf\x40\xe6\x65\xf7\xa1\x90\xd2\xaf\xa5\x92\x1f\x8c\x33\x80\x85\x49\xb2\xb6\x03\x45\x55\x65\x51\x1a\x68\xb4\x14\x50\x12\xba\xad\x10\xff\xfd\xca\x49\x20\xb4\xd0\xab\xab\xdb\xfb\x96\xcc\x39\x33\x3e\x33\x73\x06\x14\x75\xb4\x4b\xfa\x80\x37\x9b\x4a\xad\x3f\x4e\xd2\x30\xae\x75\x97\x49\xba\x18\xbf\x84\x95\xed\xd6\xca\x08\x9c\x49\x05\xae\xf6\x01\x84\x0e\x04\x97\xf8\x39\x4d\x57\xc9\x6d\xbd\xbe\xd9\x54\xfe\x8a\x16\x4f\xe1\xdb\x3e\x97\xf9\x49\xc5\xfe\xbb\x26\xd3\x38\x5a\xcc\x2a\xdb\xed\x6d\xe3\xe2\x1f\x3b\xaf\xe2\x10\x45\xb4\xc3\x04\xae\xbf\x8e\xe3\xfa\x3c\x7a\xac\x87\xe9\xe4\xa9\x6e\x55\x33\x74\x48\x78\x06\x22\x54\x04\xa4\x4b\x7c\xd9\xf5\x94\xa6\x5e\xe0\x2a\x7c\x6e\xdb\x76\x51\xa8\x0b\x44\xa8\x16\x10\xa5\x99\xab\x40\x0c\x08\xc7\x57\x3b\x0c\x38\x50\xc5\x3c\x57\x2b\xd6\x07\x2f\x50\xb8\xb1\x87\x8a\x2e\x28\x67\xe0\xaa\xdf\xe8\xe3\xa6\x59\x28\xeb\x93\xbb\xbd\x3a\x89\xaf\x0e\xa2\x43\xc2\xcb\x00\xf5\x84\xc4\x96\x65\x55\xef\x27\xf3\xb5\x29\xfa\x90\xeb\x60\x2e\x53\x8c\x70\x4d\x9c\x01\x08\xc5\x24\x7c\x77\xb0\xbb\x82\x94\x07\x52\x81\xf8\x62\x95\x7f\xa4\xb6\x96\x8a\x28\xc0\x8b\xf0\xff\xd3\xb0\xf2\x7a\xe0\x62\xb3\xd7\xb3\xa2\xe9\xb3\xf3\x62\x1c\x0e\x93\xd4\x1b\x80\x18\xe1\xcf\x01\x2d\xc5\xe0\x38\xd8\x26\x9c\xb7\x08\xed\xe1\x55\xbc\x7c\x7b\x3f\x82\x7d\xe1\xdd\x8d\x70\xae\xa2\x9c\xe4\xf7\x97\x2b\x95\x60\x54\x69\x01\xd4\x73\xdb\xac\xa3\x69\x17\x68\x0f\xa3\xe9\x78\x9e\x84\x3b\x6b\x92\x40\x79\x9a\x7a\x7d\x9f\xe4\x5e\x13\xa0\xc0\x35\x5f\x18\xd9\x3b\x0e\xb8\xa4\xc5\x41\x0f\x1a\x18\xa5\xf1\x3a\x44\xb9\xd2\x7f\x03\x4f\x11\x6d\xda\x02\xd7\xd1\xad\x91\x02\x89\x2f\x1b\xcd\xcb\xe6\xf5\x4d\xa3\x79\x6d\xbc\x92\x75\xfb\x50\x14\xc9\x9b\x5c\x4e\xa7\x87\xff\xba\x4d\x18\x0f\x04\xe8\x21\x61\x0a\xa3\x2b\xdb\xde\x3f\x9a\xe3\x02\xda\x02\x64\xb7\xbc\x0e\x74\x61\x1f\x91\x1c\xb3\xb6\xdd\x91\xa0\xf3\x23\x7c\x28\x98\x82\x92\x70\xea\x15\xe2\x94\xb8\x8d\x8c\xf8\x24\x9c\xac\xe3\x28\x7d\x2f\x9c\xae\x84\x71\x85\xa3\x29\xd1\x6d\xc6\x01\x9b\x83\xcf\x8f\x7e\x32\xae\x4d\xe2\x34\x67\x15\x3b\xa3\x20\x94\x99\x6c\xf7\xc3\xc0\xb2\xe8\xa7\xe4\x24\x8c\x5f\xc3\xb8\x2c\xd0\x83\xd1\x17\x94\xff\xc2\xf7\xc3\x95\x29\x2e\xf7\x9b\xcc\x3b\x31\x87\xf7\x6b\x2a\x33\xea\x4f\xa5\xe6\x8c\x13\x7a\x57\xe1\xa1\xda\x8c\x76\x42\x72\xc6\x2a\x05\x67\xb4\x23\xd5\x56\xf5\x7e\xbe\x9c\xcd\xa2\xc5\xac\x18\xb1\x03\xad\xa0\x83\x51\xfb\xa0\x29\xee\x75\xb4\x4f\x68\x8f\x74\x40\x73\x18\x80\x49\x37\x2f\xe4\x33\xc1\x59\x06\x2a\xac\x36\x8d\xe6\x59\xad\x0f\x9e\xf5\x7d\xe1\xb5\x3f\x59\xbe\x0f\xe6\x2e\x24\x46\x8f\xe3\x24\x9a\x20\xab\x6a\x55\xef\xc7\xeb\xf4\xf9\xa1\x9c\x6f\xb7\xb8\x7e\x94\x44\x2f\xab\x79\x88\xac\x1f\x01\x00\x00\xff\xff\x20\x54\xc1\x6d\x5d\x06\x00\x00") +var _masterEtcEtcdEtcdConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x54\x5d\x6f\xea\x38\x10\x7d\xcf\xaf\x88\xcc\xcb\xee\xc3\x85\x94\xdb\x8f\xa5\x92\x1f\x8c\x33\x80\x85\x49\x52\xdb\x81\xa2\xaa\xb2\x28\x0d\x34\xda\x14\x50\x12\xba\xad\xba\xfd\xef\x2b\x27\x81\xd0\xc2\xae\x56\xba\xea\x1b\xcc\x1c\xcf\x9c\x39\x73\x26\xa0\xa8\xab\x3d\x32\x02\xfc\xfe\x6e\x37\x47\xb3\x2c\x8f\xd2\xe6\x60\x9d\xe5\xab\xd9\x73\x64\xff\x6d\x67\x4f\x51\x92\xdc\x6c\xd7\x79\x64\x7f\x7c\x58\x05\x9a\x33\xa9\xc0\xd3\x01\x80\xd0\xa1\xe0\xd2\xbc\xdc\xa4\xf1\x2a\xb7\xd1\x53\x9e\x6f\xb2\xeb\x56\x0b\xd9\xbf\xc5\xab\xc7\xe8\x75\x5f\x91\x05\x99\xed\xfc\xde\x94\x79\x1a\xaf\x96\x36\xba\x6e\xff\xfc\xc3\x41\xa7\xcb\xbb\x44\x11\xed\x32\x81\x5b\x2f\xb3\xb4\x95\xc4\x0f\xad\x28\x9f\x3f\xb6\xac\x46\x91\x9d\x10\x5e\x24\x11\xaa\x02\xd2\x23\x81\x1c\xf8\x4a\x53\x3f\xf4\x14\x3e\x73\x1c\xc7\x29\x0b\x0d\x80\x08\xd5\x05\xa2\x34\xf3\x14\x88\x31\xe1\xf8\x62\x97\x03\x0e\x54\x31\xdf\xd3\x8a\x8d\xc0\x0f\x15\x6e\xef\x53\xd5\x78\x94\x33\xf0\xd4\x2f\x0c\x78\xd5\x39\x1e\xb0\xa4\x3c\x22\xb7\x7b\xda\x12\x5f\x1c\x44\x27\x84\xd7\x01\xea\x0b\x89\x2d\xcb\x6a\xdc\xcd\x93\xad\x69\x72\x5f\x12\x64\x1e\x53\x8c\x70\x4d\xdc\x31\x08\xc5\x24\x7c\xdb\x2a\x76\x9d\x28\x0f\xa5\x02\x51\x57\x3f\x32\x0a\xc2\xdf\xd0\x4f\x4b\x45\x14\xe0\x55\xf4\xd7\xe9\xb4\xf2\x87\xe0\x61\xe3\x8e\x1f\x95\x42\x3f\xce\x2a\xed\x5c\x26\xa9\x3f\x06\x31\xc5\x5f\x03\x5a\x8a\xf1\x71\xb0\x47\x38\xef\x12\x3a\xc4\x9b\x74\xfd\xfa\x76\x94\x0e\x84\x7f\x3b\xc5\x25\x8b\x5a\xf6\x6f\xb4\x88\x54\x82\x51\xa5\x05\x50\xdf\xeb\xb1\xbe\xa6\x03\xa0\x43\x8c\x16\xb3\x24\x8b\x76\xce\x27\xa1\xf2\x35\xf5\x47\x01\x29\xad\x2c\x40\x81\x67\x7e\x61\xe4\xec\x30\xe0\x91\x2e\x07\x3d\x6e\x63\x94\xa7\xdb\x08\x95\x23\xdc\x84\xbe\x22\xda\xcc\x0b\x9e\xab\xbb\x53\x05\x12\x9f\xb7\x3b\xe7\x9d\xcb\xab\x76\xe7\xd2\x38\xae\x90\xe1\xbe\x2a\x52\x4e\xbf\x5e\x2c\x0e\xff\xeb\x1e\x61\x3c\x14\xa0\x27\x84\x29\x8c\x2e\x1c\x67\xdf\xb4\xcc\x0b\xe8\x09\x90\x83\xfa\xf8\xd0\x4f\xe7\x08\xe4\x9a\x7d\xee\x6e\x10\x9d\x1d\xe5\x27\x82\x29\xa8\x01\xa7\xba\x10\xb7\xce\x3b\xc8\x90\xcf\xa2\xf9\x36\x8d\xf3\xb7\xea\x5e\x94\x30\x76\x71\x35\x25\xba\xc7\x38\x60\xf3\x3d\x29\xbf\x29\xf3\x59\x73\x9e\xe6\x25\xaa\x5a\x26\x05\xa1\x8c\xb2\x83\x4f\x82\x15\xd1\x2f\x8f\xb3\x28\x7d\x89\xd2\xba\xc0\x10\xa6\xff\x02\xf9\x33\x7a\x3b\x5c\x99\xe2\x72\xbf\xc9\x72\x12\x73\xbe\xff\x8f\x65\x01\xfd\x4f\xaa\x25\xe2\x04\xdf\x4d\x74\xc8\xb6\x80\x9d\xa0\x5c\xa0\x6a\xc2\x05\xec\x88\xb5\xd5\xb8\x4b\xd6\xcb\x65\xbc\x5a\x56\x12\xbb\xd0\x0d\xfb\x18\xf5\x0e\x86\xe2\x7e\x5f\x07\x84\x0e\x49\x1f\x34\x87\x31\x98\xe7\xa6\x43\xa9\x09\x2e\x5e\xa0\xca\x6a\x8b\x38\x29\x6a\x7d\xf2\x6c\x10\x08\xbf\xf7\xc5\xf2\x23\x30\x77\x21\x31\x7a\x98\x65\xf1\x1c\x59\x0d\xab\x71\x37\xdb\xe6\x4f\xf7\xb5\xbe\x83\xea\xb3\x80\xb2\xf8\x79\x93\x44\xc8\xfa\x27\x00\x00\xff\xff\x6b\x4e\x0b\x40\xe2\x06\x00\x00") func masterEtcEtcdEtcdConfBytes() ([]byte, error) { return bindataRead( @@ -118,7 +118,7 @@ func masterEtcOriginMasterHtpasswd() (*asset, error) { return a, nil } -var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x7b\x6f\x1b\xb9\x11\xff\x5f\x9f\x82\x08\x0e\x48\x52\x74\x77\x25\x3b\xcf\x05\x8a\x42\xb5\x9d\x8b\x70\x76\xa2\xca\x4e\x71\x45\x5d\x1c\x28\x72\xb4\x62\xc4\x25\x37\x7c\x28\x56\xdc\x7c\xf7\x82\x8f\x7d\x4a\x4a\xd2\x5c\xd0\x73\x02\xc4\x4b\xfe\x66\x38\xe4\xfc\x38\x0f\x06\xd3\x92\x69\xcd\xa4\x38\x93\x62\xc5\x8a\x7c\x84\x50\xc5\x6d\xc1\x3a\xdf\x08\xfd\xcd\x32\x4e\xcf\x61\x85\x2d\x37\x3a\x0c\x21\x44\x3c\xc0\x2a\x6c\x98\x14\xf5\x20\x42\xb8\x62\xff\x00\xe5\x34\xe6\x68\x3b\x69\x86\x41\x6c\x73\xf4\xaf\x7f\x37\xdf\x1b\x26\x68\xde\x57\x1c\x56\x6c\x10\x0a\xb4\xb4\x8a\x80\x6e\x75\x23\xc4\x59\xc9\x8c\xce\xd1\xfd\xe7\xce\xa0\x82\x0f\x16\x74\x67\xd8\xab\x7d\xbb\x05\xa5\x18\x85\xef\x34\xb8\x63\x60\xa3\xa9\x63\xe1\x5c\xd2\xb9\x02\x0d\xe6\xfb\xb4\x53\xa6\xf1\x92\x43\x8e\x56\x98\x6b\x18\x2c\x1a\x0f\x64\xda\x77\x8d\x07\xc9\x0a\x84\x5e\xb3\x95\x49\x99\xcc\x66\x25\x2e\x60\x2e\x39\x23\xbb\xef\x74\xca\x1d\x10\xeb\x90\x0b\xcb\xbb\xe7\x9c\xa0\x12\x1b\xb2\xf6\xfa\xa7\x42\x48\xe3\xd5\xf5\x1c\x91\xa0\x0d\xec\x72\xc4\x1c\x44\xa7\x3d\xb3\x28\x88\x5d\xd2\xa8\xee\xc8\x20\xb4\xc5\xdc\x42\x8e\x1e\x1a\x65\xe1\x61\x67\x46\xe0\x12\xf2\xd6\x9c\x84\x82\x60\x40\x3b\x00\x29\x16\x87\xe8\x90\x34\x2c\xc9\x51\x25\xa9\x3e\x32\xb5\x74\x5e\xd4\x3d\xc6\xbc\x07\x62\x72\xe4\xec\xe8\x0c\xeb\x0d\xab\xde\xfa\x95\xb8\xb7\xe3\x15\x66\xdc\x2a\x18\xe0\x82\x93\x3a\x87\x1f\xfd\x83\x8b\x42\x41\x81\x8d\x54\x9d\xbb\xa4\xe4\xdd\xee\x8c\x33\x10\x66\x26\x56\x32\xd8\x4e\x40\x99\x57\xcc\x79\xbf\x15\x49\x56\x4a\x0a\x93\x78\x7c\x4a\x94\xf1\xc0\x0d\xec\xbe\x88\xdb\xc0\x6e\x84\x2b\x76\x09\x5b\xe0\x3a\x1f\x25\xce\xb7\x03\x57\x63\x6b\xd6\xad\x39\xf1\xa6\xbc\x06\x4c\x41\x45\x63\xbc\x71\x67\xd3\x1c\x75\x34\x27\x04\x37\x46\x44\x80\x2c\x4b\x29\xde\xe0\xb2\x76\x40\x72\xc4\xa8\x51\x20\x96\x51\x38\xac\x32\x57\xb0\x62\x77\xad\xd4\xaf\xc9\x02\x4a\x69\x20\xb9\x70\x98\xc4\x8f\x16\x4a\xda\x2a\xc0\xf7\x71\x3f\xbb\x49\x3f\x68\x35\x28\xc7\x94\x63\xc8\x77\x1a\xd4\x88\x48\x61\x94\xe4\x1c\x3a\x5e\x00\x0e\xa4\xbd\x10\x5c\x92\xcd\x1b\x4f\xb8\x86\xb6\x49\x89\xb5\x01\x95\xb4\xc2\x8e\x2d\x1a\xd4\x96\x11\xb8\x76\xff\x88\xe2\x0c\x54\xbc\xec\x9a\x15\xa2\x3e\xbe\xae\x37\x23\x3e\x09\xf3\xcd\x01\x76\xfc\x38\x40\x38\xff\x75\x96\xcc\xd1\xc3\x3f\x3d\x1c\x11\xa9\xf4\x94\x73\xf9\x11\xe8\x5b\xc5\x0a\x26\xbc\x67\x1f\xfd\x95\x3d\xce\xb2\xc9\xc9\xf3\xdb\x74\xec\xff\x4e\x1e\xe5\xff\xb9\xfd\xf4\xb8\x99\xe2\x92\x60\xbe\x96\xda\x0c\xc6\xef\xef\xd1\xdf\xad\x34\x70\x05\x06\xa3\x47\x4c\x50\xb8\x43\xe9\x95\xdf\x6e\x3a\x9b\x6b\x34\x7e\x9c\x5e\x1b\xc5\x44\x81\x3e\x7f\x1e\x88\x6e\xec\x12\x94\x00\x03\xfa\x36\xa5\x21\x26\x7d\x1d\x71\x9b\xea\x2d\xb9\x4d\x09\xb7\x6e\x89\xdb\xd4\xdb\x75\x54\xec\x4b\xc6\xa6\x17\x77\xc6\x39\x9c\x07\x6b\x5f\x4b\x6d\x9c\xf7\xf7\xed\x6c\xdc\x78\xcc\xcc\xbe\xda\xb8\xf9\xff\x45\x9f\xdf\xd4\x37\xee\x7d\x00\x9b\x3c\x3f\xb9\x4d\x4f\x0f\xfb\xec\xc8\x42\x5f\x39\xbd\x46\x2a\x8e\x53\xa1\x5b\xaa\x2f\x99\xa0\x53\x4a\x15\x68\x9d\xa3\x71\xea\xff\xe4\x2f\xc6\x4f\x4f\xe3\xdc\x1b\x30\x1f\xa5\xda\xe4\xc8\x90\xea\xc9\x08\x0c\xa1\xfd\xe8\x44\x70\x8e\xc2\x65\x48\xdd\x64\x1b\x08\x5a\x9a\xf7\xa6\xbd\x70\x84\x34\x34\x3f\x80\x70\x54\x47\xc8\x2a\xee\xaf\x6d\x82\xd6\xc6\x54\x3a\xf7\xae\x39\xe0\x90\xfc\xe4\xf4\xf9\x4b\x6f\xdd\xb5\x91\x0a\x17\xd0\x6e\xb0\x3d\xf6\x38\x15\x02\x4c\xde\x99\x48\x99\x3c\x04\xec\x67\x40\x77\x8a\xd7\xee\x14\x07\x6a\xba\xa9\xec\x00\xac\xab\xc4\x27\xbf\xd6\xb2\x95\x54\x25\x36\x39\x9a\x5d\x4d\x7f\xbe\xf8\x6d\xbe\xb8\x78\x35\xfb\x35\x0b\x1f\x37\xff\x9c\x5f\x24\x3f\xdd\x13\x59\x56\x52\x80\x30\x9f\xf3\x9f\xee\xb7\x41\x93\xab\x58\x38\x36\xa0\x4d\x5d\x0c\xb0\x61\x66\x71\xca\x99\x08\x77\x61\x01\x05\xd3\x46\xed\xea\xc3\xca\x11\x95\x64\x03\x2a\x51\x71\xa2\x66\x92\x23\x52\xfe\x74\x3c\x1e\x8f\x42\xbe\x0a\x87\x1c\x53\x95\x3b\x1b\x0e\x66\xdf\xf5\x04\x27\x4b\x2b\x28\x87\x63\x5e\x8f\x92\x5f\x76\xfc\x00\x14\x7c\x5f\x49\x65\x72\x34\x19\x9f\x3c\x1d\x8f\x5a\xdf\x74\xcd\x72\x46\xe0\x8a\xb9\x78\x0b\x6a\xaa\x0a\x5b\x82\xa8\xeb\x4d\x65\x85\x61\x25\x24\xa4\x53\x96\x26\x0e\xad\x33\x0d\xc6\x30\x51\xe8\x74\xf3\xc2\xb9\x3e\xdb\x4e\x30\xaf\xd6\x78\xf2\x97\x26\x6b\xeb\xe0\xbb\x64\x89\xc9\x06\x04\xad\xa5\x1d\xbf\x4e\x7b\x80\x12\x28\xc3\x89\xd9\x55\xd0\xae\x50\x71\x46\x7c\xfd\x93\x6d\x05\x4d\x3b\x2c\xab\x94\x34\x72\x69\x57\x31\x4b\x4a\x4b\x5d\x06\xdc\xb2\x26\xb5\x26\xe8\x01\xfe\x64\x15\x3c\xe8\x20\xfa\xf6\x3f\xc8\xc0\x90\x4c\xfa\x30\x9f\x79\x40\xad\x21\xf3\x92\xa9\x83\x3b\xf1\x36\x4b\x0c\xce\x25\x46\x0a\x9f\x51\x98\x28\x12\xe7\xb0\x64\xe5\x9c\x71\x68\x89\xe0\x9f\x2c\x5c\xea\x07\x07\x15\x6c\x60\xf7\x2d\xf2\x1b\xd8\x3d\xf8\x7f\x6c\xbc\x8c\xfc\xb0\xc2\x91\xa7\x19\x98\xcd\x73\x74\x7f\xff\xb5\x7c\xe6\x59\x47\x2f\xb6\xcc\x67\xff\x1b\x56\x82\xb4\x26\x47\xc2\x72\xfe\xf5\xda\x2c\x72\x39\xd6\x43\x5d\xba\xef\x13\xbe\x07\x0a\x74\xd7\x64\x0d\xd4\xf6\x1c\x56\x2f\xdc\x4c\x05\xda\x07\x4d\x07\x8e\xb9\xc1\xa5\xef\xb5\x2f\xa4\x63\xf9\xa0\xdf\x48\x0a\x73\xa9\xcc\x02\x8b\xc2\x95\xd3\x0f\x3b\x73\xd7\x76\x29\xc0\x9d\xd5\xf3\x93\xf4\xd4\x87\xff\x6c\xf2\xcc\xcd\xbb\x22\x9e\x38\xc9\x50\xc4\xb9\x66\x2c\x1e\xae\x37\xdb\xf3\x09\x62\xc6\xfd\xa5\x61\xf9\x59\xac\xfe\x84\x08\x25\xd4\xa0\xab\xc2\x84\x40\xe5\xa6\x0d\x08\x73\xb3\xab\x9c\xe2\x6f\xb8\x32\x7f\xee\x62\xe2\xe6\x10\x5a\x5a\xe5\xa2\xe0\x93\xf1\x78\x14\x7b\x99\x5a\xeb\x37\x29\xf5\x42\x1f\x2a\x9d\xa3\x13\xaf\x61\x7f\x33\xee\xb7\x18\x6a\xc2\xa1\x35\xc1\xfe\x52\xca\xca\x45\x87\x3f\x60\xbb\xcf\x7e\xf7\x76\x4f\xbd\x86\xbd\xbd\x74\x77\x3b\xac\x74\xbd\xc2\x70\x25\x23\x0b\xe6\x76\xc9\x19\x79\xb7\xb8\xcc\x7b\xb9\xf9\x68\x0d\x96\x77\x32\xb7\xe3\xa2\xbb\x6e\x22\x14\x17\x6d\x34\x8f\xc1\x25\x16\x1d\x67\xb3\xf3\x85\xcb\x00\xe9\xe4\xe4\x45\x20\xe6\x93\x3d\x4c\x2c\x0f\x08\xa3\x6a\x1f\x8a\x90\x2b\x70\x03\xc3\x2f\x41\x14\x66\x9d\xa3\x97\x1d\x4f\xcf\xe6\x9d\x95\xa2\xa6\x58\x03\x65\xee\x88\x0e\x4b\x47\xab\xe7\xfe\xd9\x23\x34\x06\x0a\xe8\x1a\x9b\xb6\xd2\x4a\xe4\x56\x27\xda\x4b\xb6\x57\xad\xbf\xab\xde\x75\x93\xfd\xb6\x0b\x6b\x0d\xe6\x07\x1c\x70\x46\xa4\xd0\x92\x43\x36\x72\x3d\x13\xf6\x44\x6d\x82\x6a\x09\x66\x2d\x69\x8e\xb0\x35\xae\x70\x61\x14\x84\x61\x66\x37\x8f\x71\x55\xe7\xa3\xfb\xfb\x04\xb1\x15\x4a\x2f\x04\x5e\x72\x98\x4e\xcf\xa7\xd6\xac\x1d\x2a\x10\xcd\xc7\xcb\x24\xf6\xe2\x53\x17\x85\xd1\xf4\x3c\x50\x73\x8d\x39\x07\x1f\x6b\xda\xf7\x0a\x2e\x0b\x26\x3a\xad\x71\x89\xab\x8a\x89\xe2\x2a\x9a\x41\x38\x66\xa5\x9f\xe8\xe7\x86\x23\x8f\x11\xa1\x48\x79\x5b\x81\x98\x9d\xcf\x06\xa6\xd7\x8d\x56\x08\xd5\xe7\x3e\xf2\xa7\xde\xc0\xb0\xff\x74\x3a\x3d\x8f\x71\xfc\x3c\x44\xfd\x16\x7e\x0d\x44\xb9\x70\x78\x54\x24\x00\xba\x62\x98\x95\x9d\x47\x06\x46\xbb\xef\x22\xda\x2e\x9b\xaf\x4a\xc1\x0a\x94\x02\xfa\x2e\xf6\xa5\x5d\xa0\x15\xec\x83\x85\xdf\xdc\x70\x33\x3a\xc4\xf4\x26\xa1\xc4\x8c\x77\x67\xfd\x40\xfc\xae\xeb\xe6\x78\x80\xd6\xac\xa5\x62\x9f\xa0\x65\x92\x77\x46\x5a\x32\xa2\xa4\x96\x2b\x23\x05\x67\xc2\x25\xd1\x32\x1b\x6e\xfc\x06\x04\x8e\x07\x95\x79\x9a\x9e\x64\x8d\xbe\x66\x05\x23\x37\x20\x7e\x90\x76\xaf\xcb\x73\x0f\x04\xed\x71\xec\xd2\x35\x37\xa8\xc2\x5a\x7f\x94\x8a\x0e\x99\xd6\x10\xeb\xc7\x12\x6d\x75\x2c\xdb\xae\x8d\xb7\x84\xf6\x08\xf9\xfa\x66\xee\x07\xe7\xd1\xc8\x03\xd4\x8c\x49\x74\xba\x5f\x3c\xff\xb8\xc0\x5a\xeb\xfa\xbd\x5a\x34\x0c\xde\x7a\x9b\xa1\x2b\x7c\x37\x2d\xe0\xda\xe5\x04\xea\x52\x4a\x9d\x95\xe2\x74\x08\x8b\x5a\x8b\xee\x60\xb8\x3a\xfa\x78\xfd\x12\x60\x89\x0e\xb8\x74\x87\x4b\x47\x68\x4f\x88\xae\x09\x2e\xab\x6a\x7d\xe3\x86\x07\x66\xbc\x78\x56\x17\x03\x0d\x47\x0f\xc1\x9e\x8e\xc7\xa3\x0a\x5b\xed\x58\xd8\x3e\xa6\x84\x50\x55\x0d\x1a\xa9\xa5\x94\x46\x1b\x85\xab\xd0\x61\x1d\x35\x3e\xc8\xd5\x95\x57\x93\x09\x66\x62\xa5\xb0\x36\xca\x12\x63\x55\x28\xa5\x2a\x4c\x7a\x6f\x49\xcc\x41\xba\x32\xd7\x6b\xac\x80\x36\x0f\x98\x87\x84\x46\x95\x92\xef\x81\x74\x02\x7a\x6c\xe3\x5c\xc1\x76\xed\xdf\xaf\xa4\xca\x91\x90\x14\x12\x25\x39\xa4\xbd\x6e\x37\x73\x8d\xa5\x35\x50\x77\x3c\x51\xd9\x22\xbc\xf6\x5d\x81\xd6\xb8\xa9\x13\xfb\x73\x37\x50\x56\xae\xfb\x6c\x8a\x48\x62\x15\x33\xbb\x29\xe7\x92\x60\xb7\x64\xb8\x71\x44\x37\x23\xb1\xe6\xd4\xe3\x3c\x3b\xa9\x27\x2f\xf1\x12\xb8\x9e\x83\x9a\x07\xe5\x39\x7a\x1a\x1e\xec\x18\x1d\xca\x4d\xc6\xf5\x4f\x32\x79\x59\xff\x64\x7e\x74\xa4\xa4\x75\x8d\x5c\x7b\x06\xda\x2e\xa9\x2c\xb1\xbb\xfd\x37\x17\x57\xf3\xc5\xdb\x77\x37\x17\x8b\xd9\x3c\x15\xac\x72\xdd\x79\xcc\xc3\x53\x42\x5c\x7b\xd0\x8a\xf9\xff\x24\x08\xe4\x5c\xb8\x10\x0d\x82\x80\x6e\x53\x57\x89\x05\x2e\x80\x36\x6f\x99\x49\x7d\xd6\xfe\x77\xff\x56\xec\x2f\xb6\x1b\xaf\xb8\xdc\x7d\xe5\x96\x57\x8a\x6d\xb1\x81\x5f\x06\x6f\x7c\x38\x58\xe5\xea\x35\x3f\x5f\x77\xc1\x3e\x1a\x44\x70\x5c\x7e\x4f\xc2\x63\xbc\x80\x0e\x6f\x8f\x75\x7f\x72\xf0\x75\xe7\xe0\x4d\xdf\x7f\xeb\x39\xd0\xdb\x68\xdf\x69\xd7\xad\x7e\xf3\x10\xdc\xbc\xf9\x0c\xdb\x9c\x88\x0f\x3b\x29\xf1\x5d\x24\x91\x9e\x89\x57\x9c\x15\x6b\x13\x6e\x62\xf3\xcc\x1c\x1b\xae\x7e\x50\xd9\x4a\x6e\xcb\xce\xab\x09\xdd\x09\x5c\x32\xe2\x03\xaa\x8b\x16\x4c\x14\xa1\x3e\xa1\x31\xe4\xff\x37\x00\x00\xff\xff\x51\xa6\x87\xe7\xa2\x1a\x00\x00") +var _masterEtcOriginMasterMasterConfigYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x79\x6f\x1b\xbb\x11\xff\x5f\x9f\x82\x10\x1e\x90\xa4\xe8\xae\x64\x3b\xe7\x02\x45\xa1\xda\xce\x8b\xf1\xec\x58\x95\x9d\xa2\x40\x5d\x04\x14\x39\x5a\x31\xe2\x92\x1b\x1e\x8a\x15\xbf\x7c\xf7\x82\xc7\xee\x72\x75\x24\x41\x9a\xf6\x39\x01\xe2\x25\x67\x86\xc3\x99\x1f\xe7\x0a\xa6\x15\xd3\x9a\x49\x71\x2a\xc5\x82\x95\xc5\x00\xa1\x9a\xdb\x92\x25\xdf\x08\xfd\xcd\x32\x4e\xcf\x60\x81\x2d\x37\x3a\x2c\x21\x44\x3c\x81\x55\xd8\x30\x29\x9a\x45\x84\x70\xcd\xfe\x01\xca\x49\x2c\xd0\xfa\xa8\x5d\x06\xb1\x2e\xd0\xbf\xfe\xdd\x7e\xaf\x98\xa0\x45\x5f\x70\x38\xb1\xa5\x50\xa0\xa5\x55\x04\x74\x27\x1b\x21\xce\x2a\x66\x74\x81\x1e\xbe\x24\x8b\x0a\x3e\x5a\xd0\xc9\xb2\x17\x7b\xbd\x06\xa5\x18\x85\x1f\x54\x38\x51\xb0\x95\x94\x68\x38\x95\x74\xaa\x40\x83\xf9\x31\xe9\x94\x69\x3c\xe7\x50\xa0\x05\xe6\x1a\xb6\x0e\x8d\x06\x99\xf4\x5d\xe3\x89\x64\x0d\x42\x2f\xd9\xc2\xe4\x4c\x8e\x2e\x2a\x5c\xc2\x54\x72\x46\x36\x3f\xe8\x94\x7b\x20\xd6\x51\xce\x2c\x4f\xed\x9c\xa1\x0a\x1b\xb2\xf4\xf2\x27\x42\x48\xe3\xc5\xf5\x1c\x91\xa1\x15\x6c\x0a\xc4\x1c\x89\xce\x7b\x6a\x51\x10\x9b\xac\x15\x9d\xf0\x20\xb4\xc6\xdc\x42\x81\x1e\x19\x65\xe1\x51\xb2\x23\x70\x05\x45\xa7\x4e\x46\x41\x30\xa0\x09\x81\x14\xb3\x7d\x70\xc8\x5a\x94\x14\xa8\x96\x54\x1f\xd8\x9a\x3b\x2f\xea\x1e\x62\x3e\x00\x31\x05\x72\x7a\x24\xcb\x7a\xc5\xea\x6b\x7f\x12\xf7\x7a\xbc\xc6\x8c\x5b\x05\x5b\x74\xc1\x49\x89\xf1\xa3\x7f\x70\x59\x2a\x28\xb1\x91\x2a\x79\x4b\x4a\xde\x6f\x4e\x39\x03\x61\x2e\xc4\x42\x06\xdd\x09\x28\xf3\x9a\x39\xef\x77\x2c\xd9\x42\x49\x61\x32\x4f\x9f\x13\x65\x3c\xe1\x0a\x36\x5f\xa5\x5b\xc1\x66\x80\x6b\x76\x09\x6b\xe0\xba\x18\x64\xce\xb7\x5b\xae\xc6\xd6\x2c\x3b\x75\xe2\x4b\x79\x03\x98\x82\x8a\xca\x78\xe5\x4e\x27\x05\x4a\x24\x67\x04\xb7\x4a\x44\x02\x59\x55\x52\xbc\xc5\x55\xe3\x80\xec\x80\x52\x83\x00\x2c\xa3\x70\x38\x65\xaa\x60\xc1\xee\x3b\xae\x7f\x66\x33\xa8\xa4\x81\xec\xdc\xd1\x64\x7e\xb5\x54\xd2\xd6\x81\x7c\x97\xee\x57\xb7\xe9\x17\xad\x06\xe5\x90\x72\x88\xf2\x9d\x06\x35\x20\x52\x18\x25\x39\x87\xc4\x0b\xc0\x81\x74\x0f\x82\x4b\xb2\x7a\xeb\x01\xd7\xc2\x36\xab\xb0\x36\xa0\xb2\x8e\xd9\xa1\x45\x83\x5a\x33\x02\x37\xee\x1f\x51\x9e\x82\x8a\x8f\x5d\xb3\x52\x34\xe6\x4b\xbd\x19\xe9\xb3\xb0\xdf\x1a\x30\xf1\xe3\x16\x85\xf3\x5f\x72\x64\x81\x1e\xfd\xe9\xd1\x80\x48\xa5\x27\x9c\xcb\x4f\x40\xaf\x15\x2b\x99\xf0\x9e\x7d\xfc\x57\xf6\x64\x34\x3a\x3a\x7e\x71\x97\x8f\xfd\xdf\xa3\xc7\xc5\xef\x77\x9f\x9f\xb4\x5b\x5c\x12\xcc\x97\x52\x9b\x76\xfd\xe1\x01\xd5\x8a\x09\x83\x86\x81\x62\x88\x1e\xff\xdd\x4a\x03\x57\x60\x30\x7a\xcc\x04\x85\x7b\x94\x5f\xf9\x8b\xe7\x17\x53\x8d\xc6\x4f\xf2\x1b\xa3\x98\x28\x9f\xa0\xa1\x13\x72\xf7\xf9\xc9\x10\xfd\x8e\x3e\x3a\x1e\xf4\xe5\x4b\x7b\xd2\xca\xce\x41\x09\x30\xa0\xef\x72\x1a\x42\xd5\x96\x2e\x7b\x28\xee\x72\xbd\x26\x77\x39\xe1\xd6\x9d\x77\x97\x7b\x75\x0f\xb2\x7d\xdf\x1d\xf2\xf3\x7b\xe3\x10\xc1\xc3\x25\xde\x48\x6d\x1c\x3c\xbe\xa1\x7e\xeb\xf4\x5d\xed\xbf\x7e\x5a\x34\xd5\x0f\x1f\xe3\x4d\xf0\x9d\x96\xda\x22\x3b\x7a\x71\x7c\x97\x9f\xec\x77\xfc\x81\x83\xbe\x61\xeb\x96\x2b\xae\x53\xa1\xbb\xf7\x32\x67\x82\x4e\x28\x55\xa0\x75\x81\xc6\xb9\xff\x53\xbc\x1c\x3f\x3b\x89\x7b\x6f\xc1\x7c\x92\x6a\x55\x20\x43\xea\xa7\x03\x30\x84\xf6\x43\x1c\xc1\x05\x0a\x2f\x2a\x77\x9b\x5d\x34\xe9\xde\x4a\x6f\xdb\x33\x47\x92\xf6\xad\xec\xa1\x70\xef\x05\x21\xab\xb8\x7f\xfb\xa9\xbf\x96\xc6\xd4\xba\x70\x1e\xdb\x76\x13\x1a\x16\xc7\x27\x2f\x5e\xf5\x7c\xe4\x64\xde\x18\xa9\x70\x09\xdd\xa5\x3b\x57\xc4\xad\x10\xb9\x8a\x64\x23\x67\x72\x1f\x61\x3f\xb5\x3a\xcb\xde\x38\xcb\x6e\x89\x49\x73\xe4\x1e\xb2\x54\x88\xcf\xaa\x9d\x66\x0b\xa9\x2a\x6c\x0a\x74\x35\xb9\xb9\x3d\x9f\xbd\xbf\x9e\x9d\xff\xfa\xfe\xdd\xec\x32\xfb\xe5\x81\xc8\xaa\x96\x02\x84\xf9\x52\xfc\xf2\xb0\x0e\x12\x5c\x09\xc4\xb1\x01\x6d\x9a\xea\x82\x6d\xa7\x2a\x27\x94\x89\xf0\x76\x66\x50\x32\x6d\xd4\xa6\xb1\x57\x81\xa8\x24\x2b\x50\x99\x8a\x1b\x0d\xaa\x1c\xa8\x8a\x67\xe3\xf1\x78\x10\x12\x60\xb0\x73\xcc\x7d\xce\x26\x1c\xcc\x2e\x0c\x08\xce\xe6\x56\x50\x0e\x87\x10\x10\x39\xbf\x0e\x82\x2d\xa2\x80\x83\x5a\x2a\x53\xa0\xa3\xf1\xf1\xb3\xf1\xa0\xf3\x49\xaa\x96\x53\x02\xd7\xcc\x05\x70\x50\x13\x55\xda\x0a\x44\x53\xc0\x2a\x2b\x0c\xab\x20\x23\x49\x9d\x9b\x39\x6a\x3d\xd2\x60\x0c\x13\xa5\xce\x57\x2f\x9d\xcb\x47\xeb\x23\xcc\xeb\x25\x3e\xfa\x4b\x5b\x06\xe8\xe0\xb3\x6c\x8e\xc9\x0a\x04\x6d\xb8\x1d\xae\x4e\x7a\x04\x15\x50\x86\x33\xb3\xa9\xa1\x3b\xa1\xe6\x8c\xf8\x82\x6a\xb4\x16\x34\x4f\xd0\x55\x2b\x69\xe4\xdc\x2e\x62\xda\x95\x96\xba\x94\xba\x66\x6d\xae\xce\xd0\x10\x7f\xb6\x0a\x86\x09\x45\x5f\xff\xe1\x08\x0c\x19\x49\x9f\x37\x46\x9e\xa0\x91\x30\xf2\x9c\xb9\x23\x77\xec\x5d\xda\xd9\xb2\x4b\x8c\x1a\x3e\x45\x31\x51\x66\xce\x61\xd9\xc2\x39\x63\xdf\x11\xc1\x3f\xa3\xf0\xc0\x87\x7b\x05\xac\x60\xf3\x3d\xfc\x2b\xd8\x0c\xff\x1f\x17\xaf\x22\x3e\xac\x70\xe0\x69\x17\x2e\xa6\x85\x0b\x27\x5f\x4b\x8b\x69\x04\x71\xe8\xa3\xe7\x6b\xe6\xcb\x8a\x5b\x56\x81\xb4\xa6\x40\xc2\x72\xfe\xed\xa2\x2f\x62\x3a\x16\x5a\x29\xec\x77\x81\xdf\x23\x0a\xb0\xd7\x64\x09\xd4\xf6\x1c\xd7\x1c\xdc\x6e\x05\xf8\x07\x49\x7b\xcc\xdd\xd2\xe5\x1f\xb4\xaf\xd0\x63\x5d\xa2\xdf\x4a\x0a\x53\xa9\xcc\x0c\x8b\xd2\xd5\xe9\x8f\x92\xbd\x1b\x3b\x17\xe0\x6c\xf6\xe2\x38\x3f\xf1\x29\x61\x74\xf4\xdc\xed\xbb\xee\x80\x38\xce\x50\x1d\xba\x2e\x2f\x1a\xd9\xab\xed\x71\x05\x31\x53\xff\xd6\xa2\xfd\x34\x96\x95\x42\x84\xda\x6c\xab\x5d\xc3\x84\x40\xed\xb6\x0d\x08\x73\xbb\xa9\x9d\xe0\xef\x78\x3a\x7f\x4e\x69\xe2\xe5\x10\x9a\x5b\xe5\xa2\xe1\xd3\xf1\x78\x10\x9b\xa4\x46\xea\x77\x09\xf5\x4c\x1f\x6b\x5d\xa0\x63\x2f\x61\xf7\x32\xee\xb7\x18\x72\x82\xd1\xda\x60\x7f\x29\x65\xed\xa2\xc4\x1f\x70\xdd\xe7\xff\xf5\x75\x4f\xbc\x84\x9d\xbb\xa4\xb7\xdd\x2e\xa1\xbd\xc0\xf0\x34\x23\x0a\xa6\x76\xce\x19\x79\x37\xbb\x2c\xf6\xe7\xeb\xfd\x45\x1c\x1a\x16\x5d\x2e\x77\x90\x4c\x5f\x9f\x08\xf5\x47\x17\xe4\x63\xcc\x89\x75\xc9\xe9\xc5\xd9\xcc\x25\x86\xfc\xe8\xf8\x65\xc0\xe9\xd3\x1d\x9a\x58\x41\x10\x46\xd5\x2e\x29\x42\xae\x90\x0e\x80\xbf\x04\x51\x9a\x65\x81\x5e\x25\x8e\xbf\x98\x26\x27\x45\x49\xb1\x4c\x1a\x39\x8b\xed\xe7\x8e\x5a\x4f\xfd\x78\x25\x34\x20\x0a\xe8\x12\x9b\xae\x18\xcb\xe4\x5a\x67\xda\x73\x76\x2f\xaf\x7f\xab\xde\xeb\x93\xfd\xf6\x0e\x6b\x0d\xe6\x67\xd9\x7b\x38\x22\x52\x68\xc9\x61\x34\xec\x47\xbe\x52\x61\x8f\xe4\x36\xfa\x56\x60\x96\x92\x16\x08\x5b\xe3\x2a\x1b\x46\x41\x18\x66\x36\xd3\x18\x80\x75\x31\x78\x78\xc8\x10\x5b\xa0\xfc\x5c\xe0\x39\x87\xc9\xe4\x6c\x62\xcd\xd2\x51\x05\x24\x7a\xb1\x59\x9c\x02\x4c\x5c\xb8\x46\x93\xb3\x80\xdd\x25\xe6\x1c\x7c\x30\xea\x26\x25\x5c\x96\x4c\x24\x4d\x79\x85\xeb\x9a\x89\xf2\x2a\xaa\x41\x38\x66\x95\xdf\xe8\x27\x91\x03\x63\x90\x50\xcd\x5c\xd7\x20\x2e\xce\x2e\xb6\x54\x6f\x5a\xbc\x10\xcb\xcf\xbc\x45\x73\xaf\x60\xb8\x7f\x3e\x99\x9c\xc5\x40\x7f\xd6\x37\x52\xc7\x76\x03\x44\xb9\xb8\x79\x90\x35\x10\xec\x63\xc7\xac\x4a\xc6\x1d\x8c\xa6\x13\x1a\x6d\xe7\xed\x57\xad\x60\x01\x4a\x01\x7d\x17\x3b\xe4\x94\xd0\x0a\xf6\xd1\xc2\x7b\xb7\xdc\xae\x6e\xd3\xf4\x36\xa1\xc2\x8c\xa7\xbb\x7e\x21\x7e\x37\xc5\x77\x34\xa8\x35\x4b\xa9\xd8\x67\xd8\x87\x35\xef\xa6\xbc\x62\x44\x49\x2d\x17\x46\x0a\xce\x84\xcb\xc3\x95\x43\x61\x6a\x89\x5b\x10\xd8\x5b\x70\x38\xf2\x80\x3e\x1e\xb5\x72\x87\xbb\x66\x41\xc8\xc8\x15\x88\x9f\x7c\xa2\x97\xd9\x3b\xcd\x61\x16\x04\xed\x61\xf3\xd2\x75\x52\xa8\xc6\x5a\x7f\x92\x8a\x6e\x23\xb4\x05\xe4\xcf\x05\xe8\xe2\x50\x1a\x5f\x1a\xaf\x09\xed\x01\xf9\xcd\xed\xd4\x2f\x4e\xa3\x92\x7b\x20\x1d\xb3\xf3\x64\xb7\x3a\xff\x9f\x45\xec\x46\xf4\x4f\x16\xaa\x61\x6b\x76\xdd\x2e\x5d\xe1\xfb\x49\x09\x37\x2e\x15\x51\x97\xc9\x9a\x64\x18\xb7\x43\xf8\xd5\x5a\xa4\x8b\xe1\x21\xea\xc3\x65\x53\x20\xcb\x74\xa0\xcb\x37\xb8\x72\xcf\xc2\x43\x27\x55\xc1\x25\x73\xad\x6f\xdd\xf2\x96\x1a\x2f\x9f\x37\x35\x48\x8b\xf0\x7d\x64\xcf\xc6\xe3\x41\x8d\xad\x76\x70\xed\x86\x43\x21\x00\xd6\x5b\x7d\xdc\x5c\x4a\xa3\x8d\xc2\x75\x68\xf0\x0e\x2a\x1f\xf8\x9a\x82\xaf\xcd\x38\x17\x62\xa1\xb0\x36\xca\x12\x63\x55\xa8\xe0\x6a\x4c\x7a\xb3\x31\xe6\x48\x52\x9e\x9b\x25\x56\x40\xdb\x81\xec\x3e\xa6\x41\xad\xe4\x07\x20\x49\x9a\x88\x5d\xa4\xab\x13\x6f\xfc\x3c\x4e\xaa\x02\x09\x49\x21\x53\x92\x43\xde\x6b\xb2\x47\xae\xaf\xb5\x06\x9a\x86\x2b\x0a\x9b\x85\xe9\xe5\x15\x68\x8d\xdb\xf2\xb4\xbf\x77\x0b\x55\xed\x9a\xdf\xb6\x76\x25\x56\x31\xb3\x99\x70\x2e\x09\x76\x47\x86\xf7\x48\x74\xbb\x12\x4b\x5d\x3d\x2e\x46\xc7\xcd\xe6\x25\x9e\x03\xd7\x53\x50\xd3\x20\xbc\x40\xcf\xc2\x00\x92\xd1\x6d\xbe\xa3\x71\xf3\x93\x1d\xbd\x6a\x7e\x46\x7e\x75\xa0\xa4\x75\x7d\x64\x67\x03\x6d\xe7\x54\x56\xd8\xc5\x86\xdb\xf3\xab\xe9\xec\xfa\xdd\xed\xf9\xec\x62\x9a\x0b\x56\xe7\x4c\x0e\x62\xbe\x9f\x10\xe2\xba\x93\x8e\xcd\xff\xa7\x47\x00\xe7\xcc\x05\x7a\x10\x04\x74\x97\x10\x2b\x2c\x70\x09\xb4\x9d\xcd\x66\x8d\xad\xfd\xef\x7e\xf6\xed\x9f\xbd\x5b\xaf\xb9\xdc\x7c\x23\x06\xd4\x8a\xad\xb1\x81\xdf\xb6\x66\x96\x38\x68\xe5\xca\x44\xbf\xdf\x34\xe1\x3e\x56\x44\xe2\x78\xfc\x0e\x87\xa7\xf1\x0c\x3a\xcc\x52\x9b\xb6\xa8\x37\x68\xea\xc2\x42\x33\x72\xfa\xda\xd3\xdf\x9d\x43\xed\xe9\xb1\xb4\xef\xfc\x9b\xd1\x43\x3b\xe9\x6e\xe7\x51\xdb\xed\x56\xa4\x0f\x57\xab\xf0\x7d\x44\x95\xbe\x10\xaf\x39\x2b\x97\x26\x3c\xcd\x76\x8e\x1e\x1b\xbf\x7e\x94\x59\x4b\x6e\xab\x64\x7a\x43\x37\x02\x57\x8c\xf8\xf8\xeb\xc2\x07\x13\x65\x28\x83\x68\xcc\x10\xff\x09\x00\x00\xff\xff\xd7\x36\xe9\x25\x83\x1b\x00\x00") func masterEtcOriginMasterMasterConfigYamlBytes() ([]byte, error) { return bindataRead( @@ -158,7 +158,7 @@ func masterEtcOriginMasterSchedulerJson() (*asset, error) { return a, nil } -var _masterEtcOriginMasterSessionSecretsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4a\x2c\xc8\x0c\x4b\x2d\x2a\xce\xcc\xcf\xb3\x52\x28\x33\xe4\xca\xce\xcc\x4b\xb1\x52\x08\x4e\x2d\x06\x89\x04\xa7\x26\x17\xa5\x96\x14\x73\x15\x43\x68\x2b\x2e\x5d\x85\xc4\xd2\x92\x8c\xd4\xbc\x92\xcc\xe4\xc4\x12\xb0\x16\xa5\xea\x6a\x05\x3d\xc7\xd2\x92\x0c\x88\x5a\x85\xda\x5a\x25\x2e\x05\x85\xd4\xbc\xe4\xa2\xca\x02\x24\x15\xae\x79\xc9\x48\x0a\x00\x01\x00\x00\xff\xff\x58\x97\xb9\x86\x74\x00\x00\x00") +var _masterEtcOriginMasterSessionSecretsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4a\x2c\xc8\x0c\x4b\x2d\x2a\xce\xcc\xcf\xb3\x52\x28\x33\xe4\xca\xce\xcc\x4b\xb1\x52\x08\x4e\x2d\x06\x89\x04\xa7\x26\x17\xa5\x96\x14\x73\x15\x43\x68\x2b\x2e\x5d\x85\xc4\xd2\x92\x8c\xd4\xbc\x92\xcc\xe4\xc4\x12\xb0\x96\xea\x6a\x05\x3d\xc7\xd2\x92\x0c\x88\x52\x85\x1a\x85\xc2\xd2\xfc\x92\x54\x85\xda\x5a\x2e\x05\x85\xd4\xbc\xe4\xa2\xca\x02\x84\x3a\xd7\xbc\x64\x4c\x65\x80\x00\x00\x00\xff\xff\xc1\xc1\xc9\xa5\x80\x00\x00\x00") func masterEtcOriginMasterSessionSecretsYamlBytes() ([]byte, error) { return bindataRead( @@ -198,7 +198,7 @@ func masterTmpAnsibleAnsibleSh() (*asset, error) { return a, nil } -var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\x6d\x6f\xe3\x36\x12\xfe\xee\x5f\x31\x50\x16\xf0\x1d\xb0\xb6\xb1\xbb\x05\xee\x2a\x60\x3f\xb8\xa9\xb6\x09\xda\xc4\x86\xed\x3d\xec\x5e\x51\xa8\x14\x35\xb2\x78\xa1\x38\x2a\x49\xd9\x71\x0d\xff\xf7\x03\x45\xca\x71\x94\x78\x93\x36\x9f\x22\x73\x5e\x1e\xce\x3c\xf3\xc2\xd1\x68\x34\xb8\x80\x4f\xd7\x5f\x6e\x92\x18\xa6\x6a\x67\x4b\xa1\xd6\x90\xa1\xa4\x2d\x08\x05\x4c\xca\x11\x67\xb5\x81\x92\x19\x10\x76\x68\x60\xc3\x64\x83\xa0\xb1\x96\x8c\x63\x0e\xd9\x0e\x0c\xe6\x4e\xd4\x96\x38\xb8\x80\xf0\x47\x35\x2a\x53\x8a\xc2\x56\xcc\x58\xd4\x86\x6b\x51\xdb\xb1\x29\x61\x5b\x0a\x5e\x82\x30\xa0\xc8\x82\xc8\x91\xc9\xb7\xb0\x45\x30\x25\x35\x32\x87\x42\xdc\x83\x2d\x99\x1d\x0f\x24\x71\x26\xbd\x72\x3c\x00\x28\xc9\x58\xe3\xfe\x01\x68\x4f\xdc\xf7\x00\x60\xc3\x74\xf8\x95\x29\x23\x32\x89\x29\x27\xa5\x90\x5b\x41\x2a\xf6\x92\x8f\x4e\xeb\x9d\x2d\x49\xa5\x42\x59\xd4\xb5\x46\x67\x1c\x26\x8d\xd1\x93\x4c\xa8\x89\x3f\x7c\x3f\x18\x3c\xba\x40\x2a\x8c\x33\x6a\x99\x50\xa8\xc5\x9f\x98\xc7\xf0\x89\x49\x83\x3d\x29\x2e\x05\x2a\x9b\x66\x42\x31\xbd\x3b\x31\x4a\xfc\x89\xbd\x8a\xad\x31\xb5\x6c\x1d\x43\xb4\xf9\x4f\xb2\x58\x5e\xcf\x6e\xa3\xbe\xd0\x16\x33\xe7\xd5\x90\xc4\xa0\xa0\x58\x85\x31\x44\xd7\x37\xd3\x9f\x92\x74\xbe\x48\x3e\x5d\x7f\x99\xf8\x8f\xd5\xd7\x79\x32\xda\x62\x36\x0a\x0a\x71\xdf\xaa\xc6\x75\xda\x68\x19\xc3\xf0\x9c\xf6\x9b\x3d\xa7\xaa\x26\x85\xca\x1e\xe2\x37\xfb\x0d\x6a\x23\x48\x1d\x86\x3d\x54\x3e\x1f\x69\x8e\x05\x6b\xa4\x4d\x4d\x93\xe5\x54\x31\xa1\x62\x18\xae\x92\x9b\xf9\x62\xf6\x79\x95\x2c\xae\xe7\x63\x25\xea\xb1\xa0\xa1\x77\x1f\xd8\x15\xfe\x5f\x95\xc2\x74\xd9\xce\x10\xec\xae\xc6\x8f\x42\x15\x9a\xbd\x85\xac\xb1\x8e\x0a\x25\xdb\x20\x58\x02\x29\x36\x08\x5b\x61\x4b\xd0\xb8\x16\xa4\xbc\x18\x14\xa4\x41\xd1\x36\x98\xcb\x90\xb3\xc6\x20\x50\x01\x12\xd7\x8c\xef\x40\x23\x33\xa4\x4c\x0f\xb9\xa6\xc6\x21\x37\x28\x91\x5b\xd2\x31\x0c\x4f\x8d\xf6\xef\xe9\xce\x8c\xd5\xbb\x6f\xcb\x5f\xc0\x2d\xb5\x14\x86\x6d\xb9\x03\xeb\x2e\x26\x0c\x30\xc8\x45\x51\xa0\x46\x65\x21\x67\x96\xb5\x57\xf4\x97\x13\x16\x44\x1f\x58\xad\xa9\x42\x5b\x62\x63\x52\x45\x39\x9e\x78\xdc\x07\x97\xc3\x18\x86\xde\xeb\xa1\x0b\xe8\x12\x2d\x4c\xff\x6c\x34\x82\xa9\x91\x8b\x42\x70\xef\xca\xc5\xc6\x55\x06\xe6\xd0\x5d\xa1\xe7\xce\x9f\x9e\x5c\xd0\x92\x76\xec\xaa\x35\x6d\x44\xee\xca\x21\x62\xce\x70\x9a\x49\xca\xa2\x57\x2a\xdf\x09\x95\xc7\x10\x51\xf6\x3f\xe4\xf6\xb5\x4a\x0f\x6e\x52\xc6\x39\x35\xca\x7a\x86\x0f\x17\xc9\x4f\xd7\xcb\xd5\xe2\x6b\xba\x5c\xcd\x16\x8e\xa1\xd3\xff\x7e\x5e\x24\xe9\xf4\xf2\x72\xf6\xf9\x76\x75\x3b\xbd\x49\xfa\xe9\x7a\xbd\x8b\x3b\xdc\xbd\xe8\xe1\xe7\xe4\xeb\xdf\x70\x70\x6c\x11\x31\x44\x9d\xdc\xdf\x08\x85\x46\x26\xab\x18\x22\x4e\x1a\xc7\x5b\xa1\x72\xda\x9a\xb1\x42\x1b\x3d\x53\x4b\xe1\x97\x2b\xa6\x73\xe0\x94\xa3\xa7\x60\xa8\x8f\xf1\x23\x99\xcb\x46\x3b\x42\x4a\x47\x53\x04\x2e\x1b\x57\xc7\x60\x2c\xb3\x08\xcc\x42\x8e\xb5\xa4\x5d\xe5\x28\x6b\x45\x85\x90\x13\xfa\x06\xed\x6b\xb1\x44\x70\xdc\x34\xc1\x98\xc7\x8f\x1a\x73\x57\xa6\xee\xd4\xf7\x05\x5f\xac\x92\x65\x28\x0d\xb0\xba\x96\x02\x73\x60\xca\x71\x91\xe5\x3b\x27\x9b\x21\xfc\xd1\xa0\x16\x98\x07\x53\x6c\xcd\x84\x32\xd6\x61\x70\x76\x6a\x12\xca\xb6\xb3\xc4\xa1\xf0\x33\x25\x80\x6b\xc7\x82\x17\x92\x6c\x97\x11\xdd\x19\xd0\x8d\x1a\xc3\x54\x1a\x7a\x1b\xcc\xb9\x63\x5f\xe9\xed\x80\x12\x9c\x41\x9b\x76\x08\x1d\x0b\xa2\x82\x49\x69\x20\x63\xfc\x2e\x72\x80\xde\x39\x9c\x9a\x6a\x2d\x98\x45\xb9\x83\x6d\x89\x1a\x81\x99\x53\x7b\x21\x5b\x47\x8b\x92\xd6\xae\xde\x42\x88\xc6\xb0\x6a\x75\xb6\xcc\x00\x93\x86\x20\x17\x86\x37\xc6\xb5\x4f\x60\x19\xb9\xaa\x2f\x82\xb5\x76\x7c\x9e\xf8\x73\x00\x72\xf2\x59\x0b\x7d\xae\xf5\x60\xe0\xe3\x47\xf0\xcd\xae\x0d\xfb\xb1\xc9\x39\x03\xc1\x56\x8d\xba\x40\xee\x12\x5a\x20\x6b\x67\x9b\xb3\xd6\xa6\x8b\xa9\x13\xe5\x70\xff\x52\xac\x4b\xd4\x2e\x86\x5d\x4c\x8d\xd0\xc7\x2c\x74\x7e\x1f\x73\x66\x81\x1b\x61\x84\x05\xc9\x5c\x3c\xff\x51\x93\x71\x7e\x76\xae\xc1\x31\xac\x48\x19\xb4\x40\x1a\xde\x90\x2d\x51\xff\xf3\x0c\xd5\x7d\xdb\xed\x1c\xc4\xf0\xee\x85\x92\x38\x95\x3c\xdf\x29\x5b\xe2\xc6\x10\xd5\x1a\x0d\xaa\x27\x2d\xa7\xdf\x53\xf1\xbe\x26\xed\x70\xf8\x49\x1a\xa6\x5b\x0c\xc3\xf9\x62\x76\x93\xac\xae\x92\xcf\xcb\x34\xf9\x32\x9f\x2d\x56\xc9\x22\x0d\xb3\x73\xf8\xcd\x89\xac\x8c\x65\x52\xc6\xb0\xd2\x0d\x7e\x03\xa7\xf7\x57\x6b\x2c\xc4\x7d\x7f\x76\xf7\x41\x3f\xd4\x60\xea\x26\x46\x0c\xc3\xe9\xed\xf2\xfa\x87\x5f\x92\xf4\xc7\x64\xfe\xcb\xec\x6b\x3b\xa8\x03\xaa\x6e\x9d\x31\xa8\x37\x82\x63\x9a\x69\xba\x73\xd7\x7b\x04\xeb\x5b\x82\x0e\x57\x0c\xb0\xdf\xff\x1e\xed\xf7\x20\x53\x66\xb2\xe3\x54\x6f\x0f\x4d\x9a\x0b\x6e\x7f\x3d\x0b\xef\x37\x10\xc5\x79\xf0\x8e\xc0\x43\xd2\x62\x2d\xd4\x10\x50\x1a\x0c\x2e\x7c\x3c\x1a\x2d\xe1\x70\x88\x7e\x3f\x1c\x7a\x21\xe8\x50\x72\x66\x99\xa4\x75\x07\xf3\xec\xce\x13\xe4\x47\x41\xfe\x64\xef\x79\x76\x6d\x41\xcb\x73\xe7\xdc\xc4\xf0\x6b\x54\x5a\x5b\x9b\x78\x32\xb9\x9a\x2d\xdb\x99\x12\xbf\xff\xf0\xaf\xef\xa3\xdf\x7c\x7c\x2d\x56\xb5\xa3\xfd\x99\xb8\x9d\x05\xd4\xe9\x1d\x91\x79\xbd\xfe\x42\x76\x01\xb7\xb3\x55\x12\xfb\x5d\x48\x18\xd0\x98\x37\x2a\x67\xca\xfa\x35\x41\xe3\x1f\x8d\x08\xdd\xb5\x64\x2a\x97\x08\x81\xb2\x60\xee\x70\x0b\x19\xda\x2d\xa2\x0a\xa6\x7a\xfb\xf6\x28\xa4\xdc\x75\xcf\x59\x9b\x81\xb6\x05\xcf\x2e\xe7\xbd\xa0\xe0\x3d\xab\x6a\x89\x7e\xb9\x75\x89\x3b\x96\x45\xb4\x59\x5e\xcd\x16\x2b\x57\x09\x4f\xf6\x52\x4e\xfc\xae\x16\x5d\xc2\x51\x3f\x90\xfb\x72\x76\xf9\xf3\xfc\x7a\x75\x8e\xde\x4f\x14\x33\x66\x30\xec\xb4\x9d\xea\x0f\xd3\x65\xe2\x72\xf1\xa2\xee\x03\xd4\x4e\xf5\xf9\x00\xff\x48\xed\x14\xcb\xb1\x10\x0a\x5f\xec\x4e\x60\xe8\x61\xca\x58\x66\xee\xa0\x10\x12\xbb\x1e\xd9\xca\x8e\x77\x95\x04\xa1\x8c\xc8\xfd\x58\xec\x9b\x04\x4d\x12\xa1\xd0\x54\x3d\x93\x91\xad\x90\xb2\x9b\x7b\x8d\xa5\x9a\xea\xc6\x91\xc5\xad\x85\x8d\x71\xaf\xae\xe7\x2c\xf6\x37\xd7\x36\x9b\x6e\x88\xee\x84\x5a\x9f\x8c\x29\xd5\x54\x19\x6a\xb7\x07\x9f\x4c\x8e\x3e\xdd\xd0\x3d\x98\x04\xcb\x64\x3b\x54\xdc\x13\x8f\x69\xc7\x14\x8b\x5a\x31\xe9\xf8\xf6\x14\xb5\xf3\xb7\xc5\xa1\xc6\x47\x7c\xf3\xfb\x62\x00\x5d\x41\x3b\x02\x2d\x01\xdb\x90\xc8\x5b\x3c\x42\x71\xb7\x73\xb8\xcd\xc4\x58\x07\xab\x60\xdc\x42\x21\x54\xde\xe1\x3e\xba\xf2\x0f\x38\x00\x4e\x55\x45\xaa\xfb\x72\xdf\xaa\x10\xeb\x96\x28\x31\x4c\xd0\xf2\x89\x6f\x29\x93\xa3\xc4\x2b\x39\xec\x85\x1f\x1e\x93\xe1\x0a\x4d\x26\x05\x3f\xf6\xf4\xf6\x81\x74\x6c\x0b\xfb\x3d\x8c\x93\x7b\x1f\x97\x9b\x56\xf1\x8a\x4c\xbb\xa1\xc2\xe1\x10\xff\xfb\xbb\xef\x3e\x4c\x82\x62\xd4\x37\xc8\x6a\xf1\x97\x8d\x3d\x18\x79\x4d\x93\x82\x8b\xf0\x5e\xbf\x25\x90\xa4\xd6\xa8\x41\x21\xe6\x6e\xdb\x32\x2e\xd4\x4f\x92\x38\xfa\x30\xfe\x7e\xfc\xfe\xfd\xe8\x5d\x78\x63\x0d\x35\xb6\x85\x41\xca\x13\xbe\x6b\x2f\x3b\xb4\x01\x88\xa3\xcf\x43\xa8\xdc\x57\xd8\xce\x3b\x28\xc3\xc1\x80\x28\x74\x56\x93\x5a\x4a\x7d\xb6\xce\xbe\xd5\xff\x1f\x00\x00\xff\xff\xe7\x02\x45\x23\x68\x10\x00\x00") +var _masterTmpAnsibleAzureLocalMasterInventoryYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\x6d\x6f\xdb\x38\x12\xfe\x9e\x5f\x31\x50\x0a\xf8\x0e\xa8\x6d\xb4\x5d\xe0\x6e\x05\xf4\x83\x37\xab\x36\xc1\x36\x71\x60\x3b\x87\xf6\x16\x0b\x2d\x25\x8d\x2c\x5e\x28\x8e\x4a\x52\x76\xbc\xb9\xfc\xf7\xc5\x88\x94\xe3\x28\x49\x1b\x34\x9f\x22\x73\x5e\x1e\xce\x3c\xf3\xc2\xf1\x78\x7c\x74\x0c\x1f\xce\x3e\x9f\x27\x31\xcc\xf4\xce\x55\x52\xaf\x21\x43\x45\x5b\x90\x1a\x84\x52\xe3\x5c\x34\x16\x2a\x61\x41\xba\x91\x85\x8d\x50\x2d\x82\xc1\x46\x89\x1c\x0b\xc8\x76\x60\xb1\x60\x51\x57\xe1\xd1\x31\x84\x3f\x6a\x50\xdb\x4a\x96\xae\x16\xd6\xa1\xb1\xb9\x91\x8d\x9b\xd8\x0a\xb6\x95\xcc\x2b\x90\x16\x34\x39\x90\x05\x0a\xf5\x1a\xb6\x08\xb6\xa2\x56\x15\x50\xca\x1b\x70\x95\x70\x93\x23\x45\xb9\x50\x5e\x39\x3e\x02\xa8\xc8\x3a\xcb\xff\x00\x74\x27\xfc\x7d\x04\xb0\x11\x26\xfc\x2a\xb4\x95\x99\xc2\x34\x27\xad\x31\x77\x92\x74\xec\x25\x1f\x9c\x36\x3b\x57\x91\x4e\xa5\x76\x68\x1a\x83\x6c\x1c\xa6\xad\x35\xd3\x4c\xea\xa9\x3f\x7c\x7b\x74\xf4\xe0\x02\xa9\xb4\x6c\xd4\x09\xa9\xd1\xc8\xbf\xb0\x88\xe1\x83\x50\x16\x07\x52\xb9\x92\xa8\x5d\x9a\x49\x2d\xcc\xee\xc0\x28\xe5\x8f\xec\xd5\x62\x8d\xa9\x13\xeb\x18\xa2\xcd\x7f\x92\xc5\xf2\x6c\x7e\x11\x0d\x85\xb6\x98\xb1\x57\x4b\x0a\x83\x82\x16\x35\xc6\x10\x9d\x9d\xcf\x3e\x26\xe9\xe5\x22\xf9\x70\xf6\x79\xea\x3f\x56\x5f\x2e\x93\xf1\x16\xb3\x71\x50\x88\x87\x56\x0d\xae\xd3\xd6\xa8\x18\x46\xe7\xb3\xe5\x2a\x59\xa4\xf3\x45\xf2\x31\xbd\x5a\x7c\x1a\xbf\xba\xcd\xa9\x6e\x48\xa3\x76\x77\xf1\xab\xdb\x0d\x1a\x2b\x49\xdf\x8d\x06\x68\x7c\x1e\xd2\x02\x4b\xd1\x2a\x97\xda\x36\x2b\xa8\x16\x52\xc7\x30\x5a\x25\xe7\x97\x8b\xf9\xd5\x2a\x59\x9c\x5d\x4e\xb4\x6c\x26\x92\x46\xde\x6d\x60\x55\xf8\x7f\x55\x49\xdb\x67\x39\x43\x70\xbb\x06\xdf\x4b\x5d\x1a\xf1\x1a\xb2\xd6\x31\x05\x2a\xb1\x41\x70\x04\x4a\x6e\x10\xb6\xd2\x55\x60\x70\x2d\x49\x7b\x31\x28\xc9\x80\xa6\x6d\x30\x97\x61\x2e\x5a\x8b\x40\x25\x28\x5c\x8b\x7c\x07\x06\x85\x25\x6d\x07\xc8\x0d\xb5\x8c\xdc\xa2\xc2\xdc\x91\x89\x61\x74\x68\x74\x78\x4f\x3e\xb3\xce\xec\xbe\x2d\x7f\x0c\x17\xd4\x51\x17\xb6\xd5\x0e\x1c\x5f\x4c\x5a\x10\x50\xc8\xb2\x44\x83\xda\x41\x21\x9c\xe8\xae\xe8\x2f\x27\x1d\xc8\x21\xb0\xc6\x50\x8d\xae\xc2\xd6\xa6\x9a\x0a\x3c\xf0\x78\x1b\x5c\x8e\x62\x18\x79\xaf\x77\x7d\x40\x97\xe8\x60\xf6\x57\x6b\x10\x6c\x83\xb9\x2c\x65\xee\x5d\x71\x6c\xb8\x22\xb0\x80\xfe\x0a\x03\x77\xfe\xf4\xe0\x82\x8e\x0c\xb3\xaa\x31\xb4\x91\x05\x97\x41\x24\xd8\x70\x9a\x29\xca\xa2\x17\x2a\x5f\x4b\x5d\xc4\x10\x51\xf6\x3f\xcc\xdd\x4b\x95\xee\xdd\xa4\x22\xcf\xa9\xd5\xce\x33\x7b\xb4\x48\x3e\x9e\x2d\x57\x8b\x2f\xe9\x72\x35\x5f\x30\xaf\x67\xff\xbd\x5a\x24\xe9\xec\xe4\x64\x7e\x75\xb1\xba\x98\x9d\x27\xc3\x74\xbd\xdc\xc5\x35\xee\xbe\xeb\xe1\xb7\xe4\xcb\x0f\x38\xd8\xb7\x86\x18\xa2\x5e\xee\x07\x42\x61\x50\xa8\x3a\x86\x28\x27\x83\x93\xad\xd4\x05\x6d\xed\x44\xa3\x8b\x9e\xa8\xa5\xf0\xcb\xa9\x30\x05\xe4\x54\xa0\xa7\x60\xa8\x8f\xc9\x03\x99\x93\xd6\x30\x21\x15\xd3\x14\x21\x57\x2d\xd7\x31\x58\x27\x1c\x82\x70\x50\x60\xa3\x68\x57\x33\x65\x9d\xac\x11\x0a\x42\xdf\x98\x7d\x2d\x56\x08\xcc\x4d\x1b\x8c\x79\xfc\x68\xb0\xe0\x32\xe5\x53\xdf\x17\x7c\xb1\x2a\x91\xa1\xb2\x20\x9a\x46\x49\x2c\x40\x68\xe6\xa2\x28\x76\x2c\x9b\x21\x7c\x6d\xd1\x48\x2c\x82\x29\xb1\x16\x52\x5b\xc7\x18\xd8\x4e\x43\x52\xbb\x6e\x86\x30\x0a\x3f\x4b\x02\xb8\x6e\x1c\x78\x21\x25\x76\x19\xd1\xb5\x05\xd3\xea\x09\xcc\x94\xa5\xd7\xc1\x1c\x1f\xfb\x4a\xef\x06\x93\xcc\x05\x74\x69\x87\xd0\xb1\x20\x2a\x85\x52\x16\x32\x91\x5f\x47\x0c\xe8\x0d\xe3\x34\xd4\x18\x29\x1c\xaa\x1d\x6c\x2b\x34\x08\xc2\x1e\xda\x0b\xd9\xda\x5b\x54\xb4\xe6\x7a\x0b\x21\x9a\xc0\xaa\xd3\xd9\x0a\x0b\x42\x59\x82\x42\xda\xbc\xb5\xdc\x3e\x41\x64\xc4\x55\x5f\x06\x6b\xdd\xd8\x3c\xf0\xc7\x00\x0a\xf2\x59\x0b\x7d\xae\xf3\x60\xe1\xfd\x7b\xf0\xcd\xae\x0b\xfb\xbe\xc9\xb1\x81\x60\xab\x41\x53\x62\xce\x09\x2d\x51\x74\x33\x8d\xad\x75\xe9\x12\xfa\x40\x39\xdc\xbf\x92\xeb\x0a\x0d\xc7\xb0\x8f\xa9\x95\x66\x9f\x85\xde\xef\x43\xce\x2c\x70\x23\xad\x74\xa0\x04\xc7\xf3\x1f\x0d\x59\xf6\xb3\xe3\x06\x27\xb0\x26\x6d\xd1\x01\x19\x78\x45\xae\x42\xf3\xcf\x67\xa8\xee\xdb\x6e\xef\x20\x86\x37\xdf\x29\x89\x43\xc9\xe7\x3b\x65\x47\xdc\x18\xa2\xc6\xa0\x45\xfd\xa8\xe5\x0c\x7b\x2a\xde\x34\x64\x18\x87\x9f\xa0\x61\xba\xc5\x30\xba\x5c\xcc\xcf\x93\xd5\x69\x72\xb5\x4c\x93\xcf\x97\xf3\x05\xcf\xc4\x30\x33\x47\xdf\x9c\xc4\xda\x3a\xa1\x54\x0c\x2b\xd3\xe2\x37\x70\x7a\x7f\x8d\xc1\x52\xde\x0c\x67\xf6\x10\xf4\x7d\x0d\xa6\x3c\x31\x62\x18\xcd\x2e\x96\x67\xbf\x7c\x4a\xd2\x5f\x93\xcb\x4f\xf3\x2f\xdd\x78\x0f\xa8\xfa\x35\xc6\xa2\xd9\xc8\x1c\xd3\xcc\xd0\x35\x5f\xef\x01\xac\x6f\x09\x32\xae\x18\xe0\xf6\xf6\xcf\xe8\xf6\x16\x54\x2a\x6c\xb6\x9f\xea\xdd\xa1\x4d\x0b\x99\xbb\xdf\x9f\x85\xf7\x07\xc8\xf2\x79\xf0\x4c\xe0\x11\x19\xb9\x96\x7a\x04\xa8\x2c\x06\x17\x3e\x1e\xad\x51\x70\x77\x17\xfd\x79\x77\x37\x08\x41\x8f\x32\x17\x4e\x28\x5a\xf7\x30\x9f\xdd\x75\x82\xfc\x38\xc8\x1f\xec\x3b\x4f\xae\x2d\xe8\xf2\x82\x9d\xdb\x18\x7e\x8f\x2a\xe7\x1a\x1b\x4f\xa7\xa7\xf3\x65\x37\x53\xe2\xb7\xef\xfe\xf5\x73\xf4\x87\x8f\xaf\xc3\xba\x61\xda\x3f\x13\xb7\x67\x01\xf5\x7a\x7b\x64\x5e\x6f\xb8\x88\x1d\xc3\xc5\x7c\x95\xc4\x7e\x17\x92\x16\x0c\x16\xad\x2e\x84\x76\x7e\x4d\x30\xf8\xb5\x95\xa1\xbb\x56\x42\x17\x0a\x21\x50\x16\xec\x35\x6e\x21\x43\xb7\x45\xd4\xc1\xd4\x60\xcf\x1e\x87\x94\x73\xf7\x9c\x77\x19\xe8\x5a\xf0\xfc\xe4\x72\x10\x14\xbc\x11\x75\xa3\xd0\x2f\xb5\x9c\xb8\x7d\x59\x44\x9b\xe5\xe9\x7c\xb1\xe2\x4a\x78\xb4\x8f\xe6\x94\x5f\x37\xb2\x4f\x38\x9a\x7b\x72\x9f\xcc\x4f\x7e\xbb\x3c\x5b\x3d\x47\xef\x47\x8a\x99\xb0\x18\x76\xd9\x5e\xf5\x97\xd9\x32\xe1\x5c\x7c\x57\xf7\x1e\x6a\xaf\xfa\x74\x80\x7f\xa5\x6e\x8a\x15\x58\x4a\x8d\xdf\xed\x4e\x60\xe9\x7e\xca\x38\x61\xaf\xa1\x94\x0a\xfb\x1e\xd9\xc9\x4e\x76\xb5\x02\xa9\xad\x2c\xfc\x58\x1c\x9a\x04\x43\x0a\xa1\x34\x54\x3f\x91\x91\xad\x54\xaa\x9f\x7b\xad\xa3\x86\x9a\x96\xc9\xc2\x6b\x61\x6b\xf9\xb5\xf5\x94\xc5\xe1\xe6\xda\x65\x93\x87\xe8\x4e\xea\xf5\xc1\x98\xd2\x6d\x9d\xa1\xe1\x3d\xf8\x60\x72\x0c\xe9\x86\xfc\x50\x92\x22\x53\xdd\x50\xe1\xa7\x9d\x30\xcc\x14\x87\x46\x0b\xc5\x7c\x7b\x8c\x9a\xfd\x6d\x71\x64\xf0\x01\xdf\xfc\xbe\x18\x40\xd7\xd0\x8d\x40\x47\x20\x36\x24\x8b\x0e\x8f\xd4\x39\xef\x1c\xbc\x99\x58\xc7\xb0\x4a\x91\x3b\x28\xa5\x2e\x7a\xdc\x7b\x57\xfe\xe1\x06\x90\x53\x5d\x93\xee\xbf\xf8\x5b\x97\x72\xdd\x11\x25\x86\x29\xba\x7c\xea\x5b\xca\x74\x2f\xf1\x42\x0e\x7b\xe1\xfb\x47\x64\xb8\x42\x9b\x29\x99\xef\x7b\x7a\xf7\x30\xba\xbd\x85\xc6\xf0\x0e\xb2\xef\x0f\x11\x4c\x92\x1b\x1f\x9f\xf3\xce\xc0\x29\xd9\x6e\x53\x85\x28\xfe\xf7\x4f\x3f\xbd\x9b\x06\xfd\x08\xfe\x0f\x5f\x5b\x72\x08\xa1\xad\x1d\xb8\x10\x8d\xfc\x51\xf3\x4f\x9a\x7d\x49\x23\x83\xe3\xf0\x96\xbf\x20\x50\xa4\xd7\x68\x40\x23\x16\xbc\x91\x59\x4e\xc7\xa3\x44\x8f\xdf\x4d\x7e\x9e\xbc\x7d\x3b\x7e\x13\xde\x61\x23\x83\x5d\xf1\x90\xf6\x45\xd1\xb7\xa0\x1d\xba\x00\x84\x29\x76\x1f\x4e\xfe\x0a\x1b\x7c\x0f\x65\x74\x74\x44\x14\xba\xaf\x4d\x1d\xa5\x3e\xa3\xcf\xbe\xe3\xff\x0e\x00\x00\xff\xff\xea\xca\x5c\x16\x84\x10\x00\x00") func masterTmpAnsibleAzureLocalMasterInventoryYmlBytes() ([]byte, error) { return bindataRead( @@ -298,7 +298,7 @@ func masterTmpBootstrapconfigsMasterConfigYaml() (*asset, error) { return a, nil } -var _nodeEtcOriginCloudproviderAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2a\x49\xcd\x4b\xcc\x2b\xf1\x4c\xb1\x52\xa8\xae\x56\xd0\x73\xac\x2a\x2d\x4a\x75\xce\xcf\x4b\xcb\x4c\xd7\x0b\x81\xc8\xb8\x28\xd4\xd6\x72\x15\x97\x26\x15\x27\x17\x65\x16\x94\x64\xe6\xe7\x61\x53\x1b\x8c\x2c\x0f\xd6\x91\x98\x98\xe2\x9c\x93\x99\x8a\xdd\x68\x47\x47\x17\xa8\x24\xaa\xda\xe0\xd4\xe4\xa2\xd4\x12\x3c\xea\x21\x0a\xa0\x7a\x42\x88\x72\x7a\x51\x6a\x71\x7e\x69\x51\x72\xaa\x7b\x51\x7e\x69\x01\xa6\xd2\x20\x64\x69\x90\xfa\x9c\xfc\xe4\x44\x90\x37\x30\x95\xfa\x40\x65\x40\xaa\x00\x01\x00\x00\xff\xff\x69\xfe\xce\x7d\x37\x01\x00\x00") +var _nodeEtcOriginCloudproviderAzureConf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x90\x51\x8a\x83\x30\x10\x86\xdf\xf7\x14\x39\x81\x07\xd8\xb7\xa0\xb0\x2c\x2c\xcb\xb2\x7a\x81\x31\x4e\xcb\x40\x4c\xec\x38\x29\x58\xeb\xdd\x4b\xab\x0f\x51\xd3\x42\x9f\xbf\xff\xfb\x87\xf9\x05\x1d\x38\xf9\x6e\x3e\xd5\x38\xaa\x4c\x5f\x02\x63\xee\xdd\x81\x8e\x59\x35\x93\x42\x5d\xd5\x29\x78\x41\x35\x4d\x1f\x7d\xa8\x7b\xc3\xd4\x09\x79\x97\x72\xca\x98\xaf\x4c\x80\x26\xb7\x84\xe9\x53\x5a\x17\x0b\x4c\x3b\x25\x1a\x46\x79\xe1\xcd\x81\x8d\x5b\xbd\xf5\x1a\x63\xef\x03\x1b\xfc\x62\x1f\xba\xbd\xf2\x1f\xe3\xd8\xb3\xde\xc0\xfd\xdd\xbd\xf2\xb3\x90\xd5\x80\x68\x02\x93\x0c\x8f\x9a\x5f\x68\x31\xb1\xe1\x36\x12\xfb\x1d\x53\x0b\x3c\xe8\x33\x90\x85\x9a\x2c\xc9\x50\xa2\xa4\x8b\xfe\x9e\x66\xe3\xc6\x5b\x00\x00\x00\xff\xff\x34\x7d\xff\xaa\x01\x02\x00\x00") func nodeEtcOriginCloudproviderAzureConfBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/openshift/certgen/unstable/templates/master/etc/etcd/etcd.conf b/pkg/openshift/certgen/unstable/templates/master/etc/etcd/etcd.conf index 4762d60b51..c148e25cc3 100644 --- a/pkg/openshift/certgen/unstable/templates/master/etc/etcd/etcd.conf +++ b/pkg/openshift/certgen/unstable/templates/master/etc/etcd/etcd.conf @@ -1,26 +1,26 @@ -ETCD_NAME={{ .Master.Hostname }} -ETCD_LISTEN_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380 +ETCD_NAME={{ .Master.Hostname | shellQuote }} +ETCD_LISTEN_PEER_URLS={{ print "https://" (index .Master.IPs 0).String ":2380" | shellQuote }} ETCD_DATA_DIR=/var/lib/etcd/ #ETCD_WAL_DIR="" #ETCD_SNAPSHOT_COUNT=10000 ETCD_HEARTBEAT_INTERVAL=500 ETCD_ELECTION_TIMEOUT=2500 -ETCD_LISTEN_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379 +ETCD_LISTEN_CLIENT_URLS={{ print "https://" (index .Master.IPs 0).String ":2379" | shellQuote }} #ETCD_MAX_SNAPSHOTS=5 #ETCD_MAX_WALS=5 #ETCD_CORS= #[cluster] -ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{ (index .Master.IPs 0).String }}:2380 -ETCD_INITIAL_CLUSTER={{ .Master.Hostname }}=https://{{ (index .Master.IPs 0).String }}:2380 +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ print "https://" (index .Master.IPs 0).String ":2380" | shellQuote }} +ETCD_INITIAL_CLUSTER={{ print .Master.Hostname "=https://" (index .Master.IPs 0).String ":2380" | shellQuote }} ETCD_INITIAL_CLUSTER_STATE=new ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster-1 #ETCD_DISCOVERY= #ETCD_DISCOVERY_SRV= #ETCD_DISCOVERY_FALLBACK=proxy #ETCD_DISCOVERY_PROXY= -ETCD_ADVERTISE_CLIENT_URLS=https://{{ (index .Master.IPs 0).String }}:2379 +ETCD_ADVERTISE_CLIENT_URLS={{ print "https://" (index .Master.IPs 0).String ":2379" | shellQuote }} #ETCD_STRICT_RECONFIG_CHECK="false" #ETCD_AUTO_COMPACTION_RETENTION="0" #ETCD_ENABLE_V2="true" diff --git a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml index a4be4587c6..fdac6afb98 100644 --- a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml +++ b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/master-config.yaml @@ -60,13 +60,13 @@ controllers: '*' corsAllowedOrigins: - (?i)//127\.0\.0\.1(:|\z) - (?i)//localhost(:|\z) -- (?i)//{{ QuoteMeta (index .Master.IPs 0).String }}(:|\z) +- {{ print "(?i)//" (QuoteMeta (index .Master.IPs 0).String) "(:|\\z)" | quote }} - (?i)//kubernetes\.default(:|\z) - (?i)//kubernetes\.default\.svc\.cluster\.local(:|\z) - (?i)//kubernetes(:|\z) -- (?i)//{{ QuoteMeta .ExternalMasterHostname }}(:|\z) +- {{ print "(?i)//" (QuoteMeta .ExternalMasterHostname) "(:|\\z)" | quote }} - (?i)//openshift\.default(:|\z) -- (?i)//{{ QuoteMeta .Master.Hostname }}(:|\z) +- {{ print "(?i)//" (QuoteMeta .Master.Hostname) "(:|\\z)" | quote }} - (?i)//openshift\.default\.svc(:|\z) - (?i)//kubernetes\.default\.svc(:|\z) - (?i)//172\.30\.0\.1(:|\z) @@ -80,14 +80,14 @@ etcdClientInfo: certFile: master.etcd-client.crt keyFile: master.etcd-client.key urls: - - https://{{ .Master.Hostname }}:2379 + - {{ print "https://" .Master.Hostname ":2379" | quote }} etcdStorageConfig: kubernetesStoragePrefix: kubernetes.io kubernetesStorageVersion: v1 openShiftStoragePrefix: openshift.io openShiftStorageVersion: v1 imageConfig: - format: IMAGE_PREFIX/IMAGE_TYPE-${component}:${version} + format: MASTER_OREG_URL-${component}:${version} latest: false imagePolicyConfig: internalRegistryHostname: docker-registry.default.svc:5000 @@ -119,7 +119,7 @@ kubernetesMasterConfig: cloud-config: - "/etc/origin/cloudprovider/azure.conf" masterCount: 1 - masterIP: {{ (index .Master.IPs 0).String }} + masterIP: {{ (index .Master.IPs 0).String | quote }} podEvictionTimeout: null proxyClientInfo: certFile: master.proxy-client.crt @@ -142,7 +142,7 @@ masterClients: contentType: application/vnd.kubernetes.protobuf qps: 300 openshiftLoopbackKubeConfig: openshift-master.kubeconfig -masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} +masterPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} networkConfig: clusterNetworkCIDR: 10.128.0.0/14 clusterNetworks: @@ -154,7 +154,7 @@ networkConfig: networkPluginName: redhat/openshift-ovs-subnet serviceNetworkCIDR: 172.30.0.0/16 oauthConfig: - assetPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }}/console/ + assetPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port "/console/" | quote }} grantConfig: method: auto identityProviders: @@ -166,8 +166,8 @@ oauthConfig: provider: apiVersion: v1 kind: OpenIDIdentityProvider - clientID: {{ .AzureConfig.AADClientID }} - clientSecret: {{ .AzureConfig.AADClientSecret }} + clientID: {{ .AzureConfig.AADClientID | quote }} + clientSecret: {{ .AzureConfig.AADClientSecret | quote }} claims: id: - sub @@ -178,8 +178,8 @@ oauthConfig: email: - email urls: - authorize: https://login.microsoftonline.com/{{ .AzureConfig.TenantID }}/oauth2/authorize - token: https://login.microsoftonline.com/{{ .AzureConfig.TenantID }}/oauth2/token + authorize: {{ print "https://login.microsoftonline.com/" .AzureConfig.TenantID "/oauth2/authorize" | quote }} + token: {{ print "https://login.microsoftonline.com/" .AzureConfig.TenantID "/oauth2/token" | quote }} {{- end}} - name: Local password challenge: true @@ -190,8 +190,8 @@ oauthConfig: file: /etc/origin/master/htpasswd kind: HTPasswdPasswordIdentityProvider masterCA: ca-bundle.crt - masterPublicURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} - masterURL: https://{{ .ExternalMasterHostname }}:{{ .Master.Port }} + masterPublicURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} + masterURL: {{ print "https://" .ExternalMasterHostname ":" .Master.Port | quote }} sessionConfig: sessionMaxAgeSeconds: 3600 sessionName: ssn @@ -225,7 +225,7 @@ serviceAccountConfig: publicKeyFiles: - serviceaccounts.public.key servingInfo: - bindAddress: 0.0.0.0:{{ .Master.Port }} + bindAddress: {{ print "0.0.0.0:" .Master.Port | quote }} bindNetwork: tcp4 certFile: master.server.crt clientCA: ca.crt diff --git a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/session-secrets.yaml b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/session-secrets.yaml index e958ea1d0f..d440e83292 100644 --- a/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/session-secrets.yaml +++ b/pkg/openshift/certgen/unstable/templates/master/etc/origin/master/session-secrets.yaml @@ -1,5 +1,5 @@ apiVersion: v1 kind: SessionSecrets secrets: -- authentication: "{{ .AuthSecret }}" - encryption: "{{ .EncSecret }}" +- authentication: {{ .AuthSecret | quote }} + encryption: {{ .EncSecret | quote }} diff --git a/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml b/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml index 9fb2fd9342..dde741302e 100644 --- a/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml +++ b/pkg/openshift/certgen/unstable/templates/master/tmp/ansible/azure-local-master-inventory.yml @@ -15,7 +15,7 @@ localmaster: openshift_web_console_image_name: "IMAGE_PREFIX/IMAGE_TYPE-web-console:vVERSION" - oreg_url: 'IMAGE_PREFIX/IMAGE_TYPE-${component}:${version}' + oreg_url: 'MASTER_OREG_URL-${component}:${version}' openshift_master_default_subdomain: 'TEMPROUTERIP.nip.io' # FIXME @@ -86,8 +86,8 @@ localmaster: config_base: /etc/origin/ examples_content_version: "vSHORT_VER" master: - public_console_url: "https://{{ .ExternalMasterHostname }}:8443/console" - public_api_url: "https://{{ .ExternalMasterHostname }}:8443" + public_console_url: {{ print "https://" .ExternalMasterHostname ":8443/console" | quote }} + public_api_url: {{ print "https://" .ExternalMasterHostname ":8443" | quote }} etcd_urls: ["https://HOSTNAME:2379"] #FIXME: No longer needed as of openshift-ansible-3.9.22-1 but we're not on that version yet node: nodename: 'HOSTNAME' diff --git a/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf b/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf index 5241e2afa2..870b2aad1d 100644 --- a/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf +++ b/pkg/openshift/certgen/unstable/templates/node/etc/origin/cloudprovider/azure.conf @@ -1,7 +1,9 @@ -tenantId: {{ .AzureConfig.TenantID }} -subscriptionId: {{ .AzureConfig.SubscriptionID }} -aadClientId: {{ .AzureConfig.AADClientID }} -aadClientSecret: {{ .AzureConfig.AADClientSecret }} -aadTenantId: {{ .AzureConfig.TenantID }} -resourceGroup: {{ .AzureConfig.ResourceGroup }} -location: {{ .AzureConfig.Location }} +tenantId: {{ .AzureConfig.TenantID | quote }} +subscriptionId: {{ .AzureConfig.SubscriptionID | quote }} +aadClientId: {{ .AzureConfig.AADClientID | quote }} +aadClientSecret: {{ .AzureConfig.AADClientSecret | quote }} +aadTenantId: {{ .AzureConfig.TenantID | quote }} +resourceGroup: {{ .AzureConfig.ResourceGroup | quote }} +location: {{ .AzureConfig.Location | quote }} +securityGroupName: {{ .AzureConfig.SecurityGroupName | quote }} +primaryAvailabilitySetName: {{ .AzureConfig.PrimaryAvailabilitySetName | quote }} diff --git a/pkg/operations/cordondrainvm.go b/pkg/operations/cordondrainvm.go index 4a58db9b32..d37116003d 100644 --- a/pkg/operations/cordondrainvm.go +++ b/pkg/operations/cordondrainvm.go @@ -1,11 +1,11 @@ package operations import ( - "fmt" "strings" "time" "github.com/Azure/acs-engine/pkg/armhelpers" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -177,7 +177,7 @@ func (o *drainOperation) evictPods(pods []v1.Pod, policyGroupVersion string) err } else if apierrors.IsTooManyRequests(err) { time.Sleep(5 * time.Second) } else { - errCh <- fmt.Errorf("error when evicting pod %q: %v", pod.Name, err) + errCh <- errors.Wrapf(err, "error when evicting pod %q", pod.Name) return } } @@ -186,7 +186,7 @@ func (o *drainOperation) evictPods(pods []v1.Pod, policyGroupVersion string) err if err == nil { doneCh <- true } else { - errCh <- fmt.Errorf("error when waiting for pod %q terminating: %v", pod.Name, err) + errCh <- errors.Wrapf(err, "error when waiting for pod %q terminating", pod.Name) } }(pod, doneCh, errCh) } @@ -202,7 +202,7 @@ func (o *drainOperation) evictPods(pods []v1.Pod, policyGroupVersion string) err return nil } case <-time.After(o.timeout): - return fmt.Errorf("Drain did not complete within %v", o.timeout) + return errors.Errorf("Drain did not complete within %v", o.timeout) } } } diff --git a/pkg/operations/deletevm.go b/pkg/operations/deletevm.go index e7be1cfa14..70893d1a14 100644 --- a/pkg/operations/deletevm.go +++ b/pkg/operations/deletevm.go @@ -5,6 +5,7 @@ import ( "github.com/Azure/acs-engine/pkg/armhelpers" "github.com/Azure/acs-engine/pkg/armhelpers/utils" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -27,7 +28,7 @@ func CleanDeleteVirtualMachine(az armhelpers.ACSEngineClient, logger *log.Entry, if vhd == nil && managedDisk == nil { logger.Errorf("failed to get a valid os disk URI for VM: %s/%s", resourceGroup, name) - return fmt.Errorf("os disk does not have a VHD URI") + return errors.New("os disk does not have a VHD URI") } osDiskName := vm.VirtualMachineProperties.StorageProfile.OsDisk.Name diff --git a/pkg/operations/kubernetesupgrade/upgradecluster.go b/pkg/operations/kubernetesupgrade/upgradecluster.go index fc85767268..6af732f3cb 100644 --- a/pkg/operations/kubernetesupgrade/upgradecluster.go +++ b/pkg/operations/kubernetesupgrade/upgradecluster.go @@ -123,6 +123,11 @@ func (uc *UpgradeCluster) UpgradeCluster(subscriptionID uuid.UUID, kubeConfig, r upgrader110.Init(uc.Translator, uc.Logger, uc.ClusterTopology, uc.Client, kubeConfig, uc.StepTimeout, acsengineVersion) upgrader = upgrader110 + case strings.HasPrefix(upgradeVersion, "1.11."): + upgrader111 := &Upgrader{} + upgrader111.Init(uc.Translator, uc.Logger, uc.ClusterTopology, uc.Client, kubeConfig, uc.StepTimeout, acsengineVersion) + upgrader = upgrader111 + default: return uc.Translator.Errorf("Upgrade to Kubernetes version %s is not supported", upgradeVersion) } diff --git a/pkg/operations/kubernetesupgrade/upgradecluster_test.go b/pkg/operations/kubernetesupgrade/upgradecluster_test.go index 26f054a7d0..427462b8cd 100644 --- a/pkg/operations/kubernetesupgrade/upgradecluster_test.go +++ b/pkg/operations/kubernetesupgrade/upgradecluster_test.go @@ -4,6 +4,7 @@ import ( "os" "testing" + "github.com/Azure/acs-engine/pkg/acsengine" "github.com/Azure/acs-engine/pkg/api" "github.com/Azure/acs-engine/pkg/armhelpers" "github.com/Azure/acs-engine/pkg/i18n" @@ -28,7 +29,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to list VMs during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 1, 1) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 1, 1, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" @@ -51,8 +52,8 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { os.RemoveAll("./translations") }) - It("Should return error message when failing to detete VMs during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 1, 1) + It("Should return error message when failing to delete VMs during upgrade operation", func() { + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 1, 1, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" uc := UpgradeCluster{ @@ -72,7 +73,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to deploy template during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.13", 1, 1) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.13", 1, 1, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.6.13" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -91,7 +92,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to get a virtual machine during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 1, 6) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 1, 6, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -110,7 +111,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to get storage client during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 5, 1) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 5, 1, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -129,7 +130,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to delete network interface during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 3, 2) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 3, 2, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -148,7 +149,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing on ClusterPreflightCheck operation", func() { - cs := createContainerService("testcluster", "1.6.9", 3, 3) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 3, 3, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.8.6" uc := UpgradeCluster{ Translator: &i18n.Translator{}, @@ -166,7 +167,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should return error message when failing to delete role assignment during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 3, 2) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 3, 2, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{} cs.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity = true @@ -188,7 +189,7 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { }) It("Should not fail if no managed identity is returned by azure during upgrade operation", func() { - cs := createContainerService("testcluster", "1.6.9", 3, 2) + cs := acsengine.CreateMockContainerService("testcluster", "1.6.9", 3, 2, false) cs.Properties.OrchestratorProfile.OrchestratorVersion = "1.7.14" cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{} cs.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity = true @@ -206,54 +207,3 @@ var _ = Describe("Upgrade Kubernetes cluster tests", func() { Expect(err).To(BeNil()) }) }) - -func createContainerService(containerServiceName string, orchestratorVersion string, masterCount int, agentCount int) *api.ContainerService { - cs := api.ContainerService{} - cs.ID = uuid.NewV4().String() - cs.Location = "eastus" - cs.Name = containerServiceName - - cs.Properties = &api.Properties{} - - cs.Properties.MasterProfile = &api.MasterProfile{} - cs.Properties.MasterProfile.Count = masterCount - cs.Properties.MasterProfile.DNSPrefix = "testmaster" - cs.Properties.MasterProfile.VMSize = "Standard_D2_v2" - - cs.Properties.AgentPoolProfiles = []*api.AgentPoolProfile{} - agentPool := &api.AgentPoolProfile{} - agentPool.Count = agentCount - agentPool.Name = "agentpool1" - agentPool.VMSize = "Standard_D2_v2" - agentPool.OSType = "Linux" - agentPool.AvailabilityProfile = "AvailabilitySet" - agentPool.StorageProfile = "StorageAccount" - - cs.Properties.AgentPoolProfiles = append(cs.Properties.AgentPoolProfiles, agentPool) - - cs.Properties.LinuxProfile = &api.LinuxProfile{ - AdminUsername: "azureuser", - SSH: struct { - PublicKeys []api.PublicKey `json:"publicKeys"` - }{}, - } - - cs.Properties.LinuxProfile.AdminUsername = "azureuser" - cs.Properties.LinuxProfile.SSH.PublicKeys = append( - cs.Properties.LinuxProfile.SSH.PublicKeys, api.PublicKey{KeyData: "test"}) - - cs.Properties.ServicePrincipalProfile = &api.ServicePrincipalProfile{} - cs.Properties.ServicePrincipalProfile.ClientID = "DEC923E3-1EF1-4745-9516-37906D56DEC4" - cs.Properties.ServicePrincipalProfile.Secret = "DEC923E3-1EF1-4745-9516-37906D56DEC4" - - cs.Properties.OrchestratorProfile = &api.OrchestratorProfile{} - cs.Properties.OrchestratorProfile.OrchestratorType = api.Kubernetes - cs.Properties.OrchestratorProfile.OrchestratorVersion = orchestratorVersion - - cs.Properties.CertificateProfile = &api.CertificateProfile{} - cs.Properties.CertificateProfile.CaCertificate = "cacert" - cs.Properties.CertificateProfile.KubeConfigCertificate = "kubeconfigcert" - cs.Properties.CertificateProfile.KubeConfigPrivateKey = "kubeconfigkey" - - return &cs -} diff --git a/test/e2e/cleanup.sh b/test/e2e/cleanup.sh index 8d875f02be..1409d9e7ab 100755 --- a/test/e2e/cleanup.sh +++ b/test/e2e/cleanup.sh @@ -26,8 +26,6 @@ if [ -z "$EXPIRATION_IN_HOURS" ]; then EXPIRATION_IN_HOURS=2 fi -set -eu -o pipefail - az login --service-principal \ --username "${SERVICE_PRINCIPAL_CLIENT_ID}" \ --password "${SERVICE_PRINCIPAL_CLIENT_SECRET}" \ @@ -42,7 +40,7 @@ az account set -s $SUBSCRIPTION_ID_TO_CLEANUP (( deadline=$(date +%s)-${expirationInSecs%.*} )) # find resource groups created before our deadline echo "Looking for resource groups created over ${EXPIRATION_IN_HOURS} hours ago..." -for resourceGroup in `az group list | jq --arg dl $deadline '.[] | select(.id | contains("acse-test-infrastructure") | not) | select(.tags.now < $dl).name' | tr -d '\"' || ""`; do +for resourceGroup in `az group list | jq --arg dl $deadline '.[] | select(.name | startswith("acse-") | not) | select(.tags.now < $dl).name' | tr -d '\"' || ""`; do for deployment in `az group deployment list -g $resourceGroup | jq '.[] | .name' | tr -d '\"' || ""`; do echo "Will delete deployment ${deployment} from resource group ${resourceGroup}..." az group deployment delete -n $deployment -g $resourceGroup || echo "unable to delete deployment ${deployment}, will continue..." diff --git a/test/e2e/engine/template.go b/test/e2e/engine/template.go index e293c2d83c..bd9dd3c2db 100644 --- a/test/e2e/engine/template.go +++ b/test/e2e/engine/template.go @@ -227,7 +227,7 @@ func (e *Engine) HasGPUNodes() bool { func (e *Engine) HasAddon(name string) (bool, api.KubernetesAddon) { for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { if addon.Name == name { - return *addon.Enabled, addon + return helpers.IsTrueBoolPointer(addon.Enabled), addon } } return false, api.KubernetesAddon{} diff --git a/test/e2e/kubernetes/deployment/deployment.go b/test/e2e/kubernetes/deployment/deployment.go index daf69b7047..87bd7fb6d8 100644 --- a/test/e2e/kubernetes/deployment/deployment.go +++ b/test/e2e/kubernetes/deployment/deployment.go @@ -1,6 +1,7 @@ package deployment import ( + "context" "encoding/json" "fmt" "log" @@ -66,8 +67,7 @@ func CreateLinuxDeploy(image, name, namespace, miscOpts string) (*Deployment, er } else { cmd = exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--overrides", overrides) } - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out)) return nil, err @@ -85,8 +85,7 @@ func CreateLinuxDeploy(image, name, namespace, miscOpts string) (*Deployment, er func RunLinuxDeploy(image, name, namespace, command string, replicas int) (*Deployment, error) { overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}` cmd := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--replicas", strconv.Itoa(replicas), "--overrides", overrides, "--command", "--", "/bin/sh", "-c", command) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out)) return nil, err @@ -103,8 +102,7 @@ func RunLinuxDeploy(image, name, namespace, command string, replicas int) (*Depl func CreateWindowsDeploy(image, name, namespace string, port int, hostport int) (*Deployment, error) { overrides := `{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"windows"}}}}}` cmd := exec.Command("kubectl", "run", name, "-n", namespace, "--image", image, "--port", strconv.Itoa(port), "--hostport", strconv.Itoa(hostport), "--overrides", overrides) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out)) return nil, err @@ -120,8 +118,7 @@ func CreateWindowsDeploy(image, name, namespace string, port int, hostport int) // Get returns a deployment from a name and namespace func Get(name, namespace string) (*Deployment, error) { cmd := exec.Command("kubectl", "get", "deploy", "-o", "json", "-n", namespace, name) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error while trying to fetch deployment %s in namespace %s:%s\n", name, namespace, string(out)) return nil, err @@ -138,8 +135,7 @@ func Get(name, namespace string) (*Deployment, error) { // Delete will delete a deployment in a given namespace func (d *Deployment) Delete() error { cmd := exec.Command("kubectl", "delete", "deploy", "-n", d.Metadata.Namespace, d.Metadata.Name) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error while trying to delete deployment %s in namespace %s:%s\n", d.Metadata.Namespace, d.Metadata.Name, string(out)) return err @@ -147,8 +143,7 @@ func (d *Deployment) Delete() error { // Delete any associated HPAs if d.Metadata.HasHPA { cmd := exec.Command("kubectl", "delete", "hpa", "-n", d.Metadata.Namespace, d.Metadata.Name) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Deployment %s has associated HPA but unable to delete in namespace %s:%s\n", d.Metadata.Namespace, d.Metadata.Name, string(out)) return err @@ -160,8 +155,7 @@ func (d *Deployment) Delete() error { // Expose will create a load balancer and expose the deployment on a given port func (d *Deployment) Expose(svcType string, targetPort, exposedPort int) error { cmd := exec.Command("kubectl", "expose", "deployment", d.Metadata.Name, "--type", svcType, "-n", d.Metadata.Namespace, "--target-port", strconv.Itoa(targetPort), "--port", strconv.Itoa(exposedPort)) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error while trying to expose (%s) target port (%v) for deployment %s in namespace %s on port %v:%s\n", svcType, targetPort, d.Metadata.Name, d.Metadata.Namespace, exposedPort, string(out)) return err @@ -173,8 +167,7 @@ func (d *Deployment) Expose(svcType string, targetPort, exposedPort int) error { func (d *Deployment) CreateDeploymentHPA(cpuPercent, min, max int) error { cmd := exec.Command("kubectl", "autoscale", "deployment", d.Metadata.Name, fmt.Sprintf("--cpu-percent=%d", cpuPercent), fmt.Sprintf("--min=%d", min), fmt.Sprintf("--max=%d", max)) - util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err := util.RunAndLogCommand(cmd) if err != nil { log.Printf("Error while configuring autoscale against deployment %s:%s\n", d.Metadata.Name, string(out)) return err @@ -187,3 +180,39 @@ func (d *Deployment) CreateDeploymentHPA(cpuPercent, min, max int) error { func (d *Deployment) Pods() ([]pod.Pod, error) { return pod.GetAllByPrefix(d.Metadata.Name, d.Metadata.Namespace) } + +// WaitForReplicas waits for a minimum of n pod replicas +func (d *Deployment) WaitForReplicas(n int, sleep, duration time.Duration) ([]pod.Pod, error) { + readyCh := make(chan bool, 1) + errCh := make(chan error) + ctx, cancel := context.WithTimeout(context.Background(), duration) + var pods []pod.Pod + defer cancel() + go func() { + for { + select { + case <-ctx.Done(): + errCh <- fmt.Errorf("Timeout exceeded (%s) while waiting for %d Pod replicas from Deployment %s", duration.String(), n, d.Metadata.Name) + default: + pods, err := pod.GetAllByPrefix(d.Metadata.Name, d.Metadata.Namespace) + if err != nil { + errCh <- err + return + } + if len(pods) >= n { + readyCh <- true + } else { + time.Sleep(sleep) + } + } + } + }() + for { + select { + case err := <-errCh: + return pods, err + case _ = <-readyCh: + return pods, nil + } + } +} diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go index 9410fd4f18..608491d5a0 100644 --- a/test/e2e/kubernetes/kubernetes_test.go +++ b/test/e2e/kubernetes/kubernetes_test.go @@ -8,7 +8,6 @@ import ( "os/exec" "path/filepath" "regexp" - "strings" "time" "github.com/Azure/acs-engine/pkg/api/common" @@ -18,8 +17,10 @@ import ( "github.com/Azure/acs-engine/test/e2e/kubernetes/job" "github.com/Azure/acs-engine/test/e2e/kubernetes/networkpolicy" "github.com/Azure/acs-engine/test/e2e/kubernetes/node" + "github.com/Azure/acs-engine/test/e2e/kubernetes/persistentvolumeclaims" "github.com/Azure/acs-engine/test/e2e/kubernetes/pod" "github.com/Azure/acs-engine/test/e2e/kubernetes/service" + "github.com/Azure/acs-engine/test/e2e/kubernetes/storageclass" "github.com/Azure/acs-engine/test/e2e/kubernetes/util" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -66,6 +67,23 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu It("should have functional DNS", func() { if !eng.HasWindowsAgents() { + if !eng.HasNetworkPolicy("calico") { + var err error + var p *pod.Pod + p, err = pod.CreatePodFromFile(filepath.Join(WorkloadDir, "dns-liveness.yaml"), "dns-liveness", "default") + if cfg.SoakClusterName == "" { + Expect(err).NotTo(HaveOccurred()) + } else { + if err != nil { + p, err = pod.Get("dns-liveness", "default") + Expect(err).NotTo(HaveOccurred()) + } + } + running, err := p.WaitOnReady(5*time.Second, 2*time.Minute) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + } + kubeConfig, err := GetConfig() Expect(err).NotTo(HaveOccurred()) master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) @@ -157,30 +175,6 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } }) - It("should be running the expected version", func() { - hasWindows := eng.HasWindowsAgents() - version, err := node.Version() - Expect(err).NotTo(HaveOccurred()) - - var expectedVersion string - if eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorRelease != "" || - eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorVersion != "" { - expectedVersion = common.RationalizeReleaseAndVersion( - common.Kubernetes, - eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorRelease, - eng.ClusterDefinition.Properties.OrchestratorProfile.OrchestratorVersion, - hasWindows) - } else { - expectedVersion = common.RationalizeReleaseAndVersion( - common.Kubernetes, - eng.Config.OrchestratorRelease, - eng.Config.OrchestratorVersion, - hasWindows) - } - expectedVersionRationalized := strings.Split(expectedVersion, "-")[0] // to account for -alpha and -beta suffixes - Expect(version).To(Equal("v" + expectedVersionRationalized)) - }) - It("should have kube-dns running", func() { running, err := pod.WaitOnReady("kube-dns", "kube-system", 3, 30*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) @@ -351,6 +345,85 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } }) + It("should have cluster-omsagent daemonset running", func() { + if hasContainerMonitoring, clusterContainerMonitoringAddon := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the correct resources have been applied") + pods, err := pod.GetAllByPrefix("omsagent-", "kube-system") + Expect(err).NotTo(HaveOccurred()) + for i, c := range clusterContainerMonitoringAddon.Containers { + err := pods[0].Spec.Containers[i].ValidateResources(c) + Expect(err).NotTo(HaveOccurred()) + } + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + + It("should have cluster-omsagent replicaset running", func() { + if hasContainerMonitoring, clusterContainerMonitoringAddon := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-rs", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the correct resources have been applied") + pods, err := pod.GetAllByPrefix("omsagent-rs", "kube-system") + Expect(err).NotTo(HaveOccurred()) + for i, c := range clusterContainerMonitoringAddon.Containers { + err := pods[0].Spec.Containers[i].ValidateResources(c) + Expect(err).NotTo(HaveOccurred()) + } + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + + It("should be successfully running kubepodinventory plugin - ContainerMonitoring", func() { + if hasContainerMonitoring, _ := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-rs", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the kubepodinventory plugin is writing data successfully") + pods, err := pod.GetAllByPrefix("omsagent-rs", "kube-system") + Expect(err).NotTo(HaveOccurred()) + _, err = pods[0].Exec("grep", "\"in_kube_podinventory::emit-stream : Success\"", "/var/opt/microsoft/omsagent/log/omsagent.log") + Expect(err).NotTo(HaveOccurred()) + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + + It("should be successfully running kubenodeinventory plugin - ContainerMonitoring", func() { + if hasContainerMonitoring, _ := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-rs", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the kubenodeinventory plugin is writing data successfully") + pods, err := pod.GetAllByPrefix("omsagent-rs", "kube-system") + Expect(err).NotTo(HaveOccurred()) + _, err = pods[0].Exec("grep", "\"in_kube_nodeinventory::emit-stream : Success\"", "/var/opt/microsoft/omsagent/log/omsagent.log") + Expect(err).NotTo(HaveOccurred()) + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + + It("should be successfully running cadvisor_perf plugin - ContainerMonitoring", func() { + if hasContainerMonitoring, _ := eng.HasAddon("container-monitoring"); hasContainerMonitoring { + running, err := pod.WaitOnReady("omsagent-", "kube-system", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + By("Ensuring that the cadvisor_perf plugin is writing data successfully") + pods, err := pod.GetAllByPrefix("omsagent-", "kube-system") + Expect(err).NotTo(HaveOccurred()) + _, err = pods[0].Exec("grep", "\"in_cadvisor_perf::emit-stream : Success\"", "/var/opt/microsoft/omsagent/log/omsagent.log") + Expect(err).NotTo(HaveOccurred()) + } else { + Skip("container monitoring disabled for this cluster, will not test") + } + }) + It("should have rescheduler running", func() { if hasRescheduler, reschedulerAddon := eng.HasAddon("rescheduler"); hasRescheduler { running, err := pod.WaitOnReady("rescheduler", "kube-system", 3, 30*time.Second, cfg.Timeout) @@ -482,15 +555,9 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu Expect(err).NotTo(HaveOccurred()) Expect(len(loadTestPods)).To(Equal(numLoadTestPods)) - By("Waiting 3 minutes for load to take effect") - // Wait 3 minutes for autoscaler to respond to load - time.Sleep(3 * time.Minute) - By("Ensuring we have more than 1 apache-php pods due to hpa enforcement") - phpPods, err = phpApacheDeploy.Pods() + _, err = phpApacheDeploy.WaitForReplicas(2, 5*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) - // We should have > 1 pods after autoscale effects - Expect(len(phpPods) > 1).To(BeTrue()) By("Cleaning up after ourselves") err = loadTestDeploy.Delete() @@ -585,9 +652,29 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu }) }) + Describe("after the cluster has been up for awhile", func() { + It("dns-liveness pod should not have any restarts", func() { + if !eng.HasWindowsAgents() && !eng.HasNetworkPolicy("calico") { + pod, err := pod.Get("dns-liveness", "default") + Expect(err).NotTo(HaveOccurred()) + running, err := pod.WaitOnReady(5*time.Second, 3*time.Minute) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) + restarts := pod.Status.ContainerStatuses[0].RestartCount + if cfg.SoakClusterName == "" { + err = pod.Delete() + Expect(err).NotTo(HaveOccurred()) + Expect(restarts).To(Equal(0)) + } else { + log.Printf("%d DNS livenessProbe restarts since this cluster was created...\n", restarts) + } + } + }) + }) + Describe("with calico network policy enabled", func() { It("should apply a network policy and deny outbound internet access to nginx pod", func() { - if eng.HasNetworkPolicy("calico") { + if eng.HasNetworkPolicy("calico") || eng.HasNetworkPolicy("azure") { namespace := "default" By("Creating a nginx deployment") r := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -635,12 +722,11 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu }) Describe("with a windows agent pool", func() { - // TODO stabilize this test - /*It("should be able to deploy an iis webserver", func() { + It("should be able to deploy an iis webserver", func() { if eng.HasWindowsAgents() { r := rand.New(rand.NewSource(time.Now().UnixNano())) deploymentName := fmt.Sprintf("iis-%s-%v", cfg.Name, r.Intn(99999)) - iisDeploy, err := deployment.CreateWindowsDeploy("microsoft/iis:windowsservercore-1709", deploymentName, "default", 80, -1) + iisDeploy, err := deployment.CreateWindowsDeploy("microsoft/iis:windowsservercore-1803", deploymentName, "default", 80, -1) Expect(err).NotTo(HaveOccurred()) running, err := pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout) @@ -672,46 +758,63 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } else { Skip("No windows agent was provisioned for this Cluster Definition") } - })*/ + }) - // TODO stabilize this test - /*It("should be able to reach hostport in an iis webserver", func() { + It("Should not have any unready or crashing pods right after deployment", func() { if eng.HasWindowsAgents() { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - hostport := 8123 - deploymentName := fmt.Sprintf("iis-%s-%v", cfg.Name, r.Intn(99999)) - iisDeploy, err := deployment.CreateWindowsDeploy("microsoft/iis:windowsservercore-1709", deploymentName, "default", 80, hostport) - Expect(err).NotTo(HaveOccurred()) + By("Checking ready status of each pod in kube-system") + pods, err := pod.GetAll("kube-system") + Expect(err).NotTo(HaveOccurred()) + Expect(len(pods.Pods)).ToNot(BeZero()) + for _, currentPod := range pods.Pods { + log.Printf("Checking %s", currentPod.Metadata.Name) + Expect(currentPod.Status.ContainerStatuses[0].Ready).To(BeTrue()) + Expect(currentPod.Status.ContainerStatuses[0].RestartCount).To(BeNumerically("<", 3)) + } + } + }) - running, err := pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout) - Expect(err).NotTo(HaveOccurred()) - Expect(running).To(Equal(true)) + // Windows Bug 16598869 + /* + It("should be able to reach hostport in an iis webserver", func() { + if eng.HasWindowsAgents() { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + hostport := 8123 + deploymentName := fmt.Sprintf("iis-%s-%v", cfg.Name, r.Intn(99999)) + iisDeploy, err := deployment.CreateWindowsDeploy("microsoft/iis:windowsservercore-1803", deploymentName, "default", 80, hostport) + Expect(err).NotTo(HaveOccurred()) - iisPods, err := iisDeploy.Pods() - Expect(err).NotTo(HaveOccurred()) - Expect(len(iisPods)).ToNot(BeZero()) + running, err := pod.WaitOnReady(deploymentName, "default", 3, 30*time.Second, cfg.Timeout) + Expect(err).NotTo(HaveOccurred()) + Expect(running).To(Equal(true)) - kubeConfig, err := GetConfig() - Expect(err).NotTo(HaveOccurred()) - master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) - sshKeyPath := cfg.GetSSHKeyPath() + iisPods, err := iisDeploy.Pods() + Expect(err).NotTo(HaveOccurred()) + Expect(len(iisPods)).ToNot(BeZero()) - for _, iisPod := range iisPods { - valid := iisPod.ValidateHostPort("(IIS Windows Server)", 10, 10*time.Second, master, sshKeyPath) - Expect(valid).To(BeTrue()) - } + kubeConfig, err := GetConfig() + Expect(err).NotTo(HaveOccurred()) + master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName()) + sshKeyPath := cfg.GetSSHKeyPath() - err = iisDeploy.Delete() - Expect(err).NotTo(HaveOccurred()) - } else { - Skip("No windows agent was provisioned for this Cluster Definition") - } - })*/ + for _, iisPod := range iisPods { + valid := iisPod.ValidateHostPort("(IIS Windows Server)", 10, 10*time.Second, master, sshKeyPath) + Expect(valid).To(BeTrue()) + } - // TODO stabilize this test - /*It("should be able to attach azure file", func() { + err = iisDeploy.Delete() + Expect(err).NotTo(HaveOccurred()) + } else { + Skip("No windows agent was provisioned for this Cluster Definition") + } + })*/ + + It("should be able to attach azure file", func() { if eng.HasWindowsAgents() { - if common.IsKubernetesVersionGe(eng.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion ,"1.8") { + if common.IsKubernetesVersionGe(eng.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion, "1.11") { + // Failure in 1.11+ - https://github.com/kubernetes/kubernetes/issues/65845 + Skip("Kubernetes 1.11 has a known issue creating Azure PersistentVolumeClaims") + } else if common.IsKubernetesVersionGe(eng.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion, "1.8") { storageclassName := "azurefile" // should be the same as in storageclass-azurefile.yaml sc, err := storageclass.CreateStorageClassFromFile(filepath.Join(WorkloadDir, "storageclass-azurefile.yaml"), storageclassName) Expect(err).NotTo(HaveOccurred()) @@ -745,6 +848,6 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } else { Skip("No windows agent was provisioned for this Cluster Definition") } - })*/ + }) }) }) diff --git a/test/e2e/kubernetes/node/node.go b/test/e2e/kubernetes/node/node.go index 8c7d649544..f57215a3f4 100644 --- a/test/e2e/kubernetes/node/node.go +++ b/test/e2e/kubernetes/node/node.go @@ -15,7 +15,7 @@ import ( const ( //ServerVersion is used to parse out the version of the API running - ServerVersion = `(Server Version:\s)+(v\d+.\d+.\d+)+` + ServerVersion = `(Server Version:\s)+(.*)` ) // Node represents the kubernetes Node Resource diff --git a/test/e2e/kubernetes/pod/pod.go b/test/e2e/kubernetes/pod/pod.go index 6713b4d05e..924c265cea 100644 --- a/test/e2e/kubernetes/pod/pod.go +++ b/test/e2e/kubernetes/pod/pod.go @@ -52,6 +52,16 @@ type Container struct { Resources Resources `json:"resources"` } +// ContainerStatus has status of a container +type ContainerStatus struct { + ContainerID string `json:"containerID"` + Image string `json:"image"` + ImageID string `json:"imageID"` + Name string `json:"name"` + Ready bool `json:"ready"` + RestartCount int `json:"restartCount"` +} + // EnvVar holds environment variables type EnvVar struct { Name string `json:"name"` @@ -84,10 +94,11 @@ type Limits struct { // Status holds information like hostIP and phase type Status struct { - HostIP string `json:"hostIP"` - Phase string `json:"phase"` - PodIP string `json:"podIP"` - StartTime time.Time `json:"startTime"` + HostIP string `json:"hostIP"` + Phase string `json:"phase"` + PodIP string `json:"podIP"` + StartTime time.Time `json:"startTime"` + ContainerStatuses []ContainerStatus `json:"containerStatuses"` } // CreatePodFromFile will create a Pod from file with a name @@ -170,7 +181,7 @@ func AreAllPodsRunning(podPrefix, namespace string) (bool, error) { var status []bool for _, pod := range pl.Pods { - matched, err := regexp.MatchString(podPrefix+"-.*", pod.Metadata.Name) + matched, err := regexp.MatchString(podPrefix, pod.Metadata.Name) if err != nil { log.Printf("Error trying to match pod name:%s\n", err) return false, err diff --git a/test/e2e/kubernetes/util/util.go b/test/e2e/kubernetes/util/util.go index 385422c299..2fe159685a 100644 --- a/test/e2e/kubernetes/util/util.go +++ b/test/e2e/kubernetes/util/util.go @@ -2,11 +2,24 @@ package util import ( "fmt" + "log" "os/exec" "strings" + "time" ) // PrintCommand prints a command string func PrintCommand(cmd *exec.Cmd) { fmt.Printf("\n$ %s\n", strings.Join(cmd.Args, " ")) } + +// RunAndLogCommand logs the command with a timestamp when it's run, and the duration at end +func RunAndLogCommand(cmd *exec.Cmd) ([]byte, error) { + cmdLine := fmt.Sprintf("$ %s", strings.Join(cmd.Args, " ")) + start := time.Now() + log.Printf("%s", cmdLine) + out, err := cmd.CombinedOutput() + end := time.Now() + log.Printf("#### %s completed in %s", cmdLine, end.Sub(start).String()) + return out, err +} diff --git a/test/e2e/kubernetes/workloads/dns-liveness.yaml b/test/e2e/kubernetes/workloads/dns-liveness.yaml new file mode 100644 index 0000000000..7cf9345d63 --- /dev/null +++ b/test/e2e/kubernetes/workloads/dns-liveness.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness + name: dns-liveness +spec: + containers: + - name: dns-liveness + image: k8s.gcr.io/busybox + args: + - /bin/sh + - -c + - while true; do sleep 600; done + livenessProbe: + exec: + command: + - nslookup + - bbc.co.uk + initialDelaySeconds: 5 + periodSeconds: 5 \ No newline at end of file diff --git a/test/e2e/runner/cli_provisioner.go b/test/e2e/runner/cli_provisioner.go index c6c01cb244..bd8c844761 100644 --- a/test/e2e/runner/cli_provisioner.go +++ b/test/e2e/runner/cli_provisioner.go @@ -250,6 +250,13 @@ func (cli *CLIProvisioner) FetchProvisioningMetrics(path string, cfg *config.Con masterFiles := agentFiles masterFiles = append(masterFiles, "/opt/azure/containers/mountetcd.sh", "/opt/azure/containers/setup-etcd.sh", "/opt/azure/containers/setup-etcd.log") hostname := fmt.Sprintf("%s.%s.cloudapp.azure.com", cli.Config.Name, cli.Config.Location) + cmd := exec.Command("ssh-agent", "-s") + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Error while trying to start ssh agent:%s\nOutput:%s", err, out) + } + authSock := strings.Split(strings.Split(string(out), "=")[1], ";") + os.Setenv("SSH_AUTH_SOCK", authSock[0]) conn, err := remote.NewConnection(hostname, "22", cli.Engine.ClusterDefinition.Properties.LinuxProfile.AdminUsername, cli.Config.GetSSHKeyPath()) if err != nil { return err @@ -273,9 +280,9 @@ func (cli *CLIProvisioner) FetchProvisioningMetrics(path string, cfg *config.Con } connectString := fmt.Sprintf("%s@%s:/tmp/k8s-*", conn.User, hostname) logsPath := filepath.Join(cfg.CurrentWorkingDir, "_logs", hostname) - cmd := exec.Command("scp", "-i", conn.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, logsPath) + cmd = exec.Command("scp", "-i", conn.PrivateKeyPath, "-o", "ConnectTimeout=30", "-o", "StrictHostKeyChecking=no", connectString, logsPath) util.PrintCommand(cmd) - out, err := cmd.CombinedOutput() + out, err = cmd.CombinedOutput() if err != nil { log.Printf("Error output:%s\n", out) return err diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/pkg/errors/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml new file mode 100644 index 0000000000..588ceca183 --- /dev/null +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/pkg/errors +go: + - 1.4.3 + - 1.5.4 + - 1.6.2 + - 1.7.1 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 0000000000..835ba3e755 --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 0000000000..273db3c98a --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 0000000000..a932eade02 --- /dev/null +++ b/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/vendor/github.com/pkg/errors/bench_test.go b/vendor/github.com/pkg/errors/bench_test.go new file mode 100644 index 0000000000..0416a3cbb8 --- /dev/null +++ b/vendor/github.com/pkg/errors/bench_test.go @@ -0,0 +1,59 @@ +// +build go1.7 + +package errors + +import ( + "fmt" + "testing" + + stderrors "errors" +) + +func noErrors(at, depth int) error { + if at >= depth { + return stderrors.New("no error") + } + return noErrors(at+1, depth) +} +func yesErrors(at, depth int) error { + if at >= depth { + return New("ye error") + } + return yesErrors(at+1, depth) +} + +func BenchmarkErrors(b *testing.B) { + var toperr error + type run struct { + stack int + std bool + } + runs := []run{ + {10, false}, + {10, true}, + {100, false}, + {100, true}, + {1000, false}, + {1000, true}, + } + for _, r := range runs { + part := "pkg/errors" + if r.std { + part = "errors" + } + name := fmt.Sprintf("%s-stack-%d", part, r.stack) + b.Run(name, func(b *testing.B) { + var err error + f := yesErrors + if r.std { + f = noErrors + } + b.ReportAllocs() + for i := 0; i < b.N; i++ { + err = f(0, r.stack) + } + b.StopTimer() + toperr = err + }) + } +} diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 0000000000..842ee80456 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/errors_test.go b/vendor/github.com/pkg/errors/errors_test.go new file mode 100644 index 0000000000..1d8c635586 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors_test.go @@ -0,0 +1,226 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "reflect" + "testing" +) + +func TestNew(t *testing.T) { + tests := []struct { + err string + want error + }{ + {"", fmt.Errorf("")}, + {"foo", fmt.Errorf("foo")}, + {"foo", New("foo")}, + {"string with format specifiers: %v", errors.New("string with format specifiers: %v")}, + } + + for _, tt := range tests { + got := New(tt.err) + if got.Error() != tt.want.Error() { + t.Errorf("New.Error(): got: %q, want %q", got, tt.want) + } + } +} + +func TestWrapNil(t *testing.T) { + got := Wrap(nil, "no error") + if got != nil { + t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrap(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := Wrap(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +type nilError struct{} + +func (nilError) Error() string { return "nil error" } + +func TestCause(t *testing.T) { + x := New("error") + tests := []struct { + err error + want error + }{{ + // nil error is nil + err: nil, + want: nil, + }, { + // explicit nil error is nil + err: (error)(nil), + want: nil, + }, { + // typed nil is nil + err: (*nilError)(nil), + want: (*nilError)(nil), + }, { + // uncaused error is unaffected + err: io.EOF, + want: io.EOF, + }, { + // caused error returns cause + err: Wrap(io.EOF, "ignored"), + want: io.EOF, + }, { + err: x, // return from errors.New + want: x, + }, { + WithMessage(nil, "whoops"), + nil, + }, { + WithMessage(io.EOF, "whoops"), + io.EOF, + }, { + WithStack(nil), + nil, + }, { + WithStack(io.EOF), + io.EOF, + }} + + for i, tt := range tests { + got := Cause(tt.err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) + } + } +} + +func TestWrapfNil(t *testing.T) { + got := Wrapf(nil, "no error") + if got != nil { + t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrapf(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, + {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, + } + + for _, tt := range tests { + got := Wrapf(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +func TestErrorf(t *testing.T) { + tests := []struct { + err error + want string + }{ + {Errorf("read error without format specifiers"), "read error without format specifiers"}, + {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"}, + } + + for _, tt := range tests { + got := tt.err.Error() + if got != tt.want { + t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) + } + } +} + +func TestWithStackNil(t *testing.T) { + got := WithStack(nil) + if got != nil { + t.Errorf("WithStack(nil): got %#v, expected nil", got) + } +} + +func TestWithStack(t *testing.T) { + tests := []struct { + err error + want string + }{ + {io.EOF, "EOF"}, + {WithStack(io.EOF), "EOF"}, + } + + for _, tt := range tests { + got := WithStack(tt.err).Error() + if got != tt.want { + t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want) + } + } +} + +func TestWithMessageNil(t *testing.T) { + got := WithMessage(nil, "no error") + if got != nil { + t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWithMessage(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := WithMessage(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) + } + } + +} + +// errors.New, etc values are not expected to be compared by value +// but the change in errors#27 made them incomparable. Assert that +// various kinds of errors have a functional equality operator, even +// if the result of that equality is always false. +func TestErrorEquality(t *testing.T) { + vals := []error{ + nil, + io.EOF, + errors.New("EOF"), + New("EOF"), + Errorf("EOF"), + Wrap(io.EOF, "EOF"), + Wrapf(io.EOF, "EOF%d", 2), + WithMessage(nil, "whoops"), + WithMessage(io.EOF, "whoops"), + WithStack(io.EOF), + WithStack(nil), + } + + for i := range vals { + for j := range vals { + _ = vals[i] == vals[j] // mustn't panic + } + } +} diff --git a/vendor/github.com/pkg/errors/example_test.go b/vendor/github.com/pkg/errors/example_test.go new file mode 100644 index 0000000000..c1fc13e384 --- /dev/null +++ b/vendor/github.com/pkg/errors/example_test.go @@ -0,0 +1,205 @@ +package errors_test + +import ( + "fmt" + + "github.com/pkg/errors" +) + +func ExampleNew() { + err := errors.New("whoops") + fmt.Println(err) + + // Output: whoops +} + +func ExampleNew_printf() { + err := errors.New("whoops") + fmt.Printf("%+v", err) + + // Example output: + // whoops + // github.com/pkg/errors_test.ExampleNew_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:17 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func ExampleWithMessage() { + cause := errors.New("whoops") + err := errors.WithMessage(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func ExampleWithStack() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Println(err) + + // Output: whoops +} + +func ExampleWithStack_printf() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Printf("%+v", err) + + // Example Output: + // whoops + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 +} + +func ExampleWrap() { + cause := errors.New("whoops") + err := errors.Wrap(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func fn() error { + e1 := errors.New("error") + e2 := errors.Wrap(e1, "inner") + e3 := errors.Wrap(e2, "middle") + return errors.Wrap(e3, "outer") +} + +func ExampleCause() { + err := fn() + fmt.Println(err) + fmt.Println(errors.Cause(err)) + + // Output: outer: middle: inner: error + // error +} + +func ExampleWrap_extended() { + err := fn() + fmt.Printf("%+v\n", err) + + // Example output: + // error + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.ExampleCause_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:63 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:104 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer +} + +func ExampleWrapf() { + cause := errors.New("whoops") + err := errors.Wrapf(cause, "oh noes #%d", 2) + fmt.Println(err) + + // Output: oh noes #2: whoops +} + +func ExampleErrorf_extended() { + err := errors.Errorf("whoops: %s", "foo") + fmt.Printf("%+v", err) + + // Example output: + // whoops: foo + // github.com/pkg/errors_test.ExampleErrorf + // /home/dfc/src/github.com/pkg/errors/example_test.go:101 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:102 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func Example_stackTrace() { + type stackTracer interface { + StackTrace() errors.StackTrace + } + + err, ok := errors.Cause(fn()).(stackTracer) + if !ok { + panic("oops, err does not implement stackTracer") + } + + st := err.StackTrace() + fmt.Printf("%+v", st[0:2]) // top two frames + + // Example output: + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.Example_stackTrace + // /home/dfc/src/github.com/pkg/errors/example_test.go:127 +} + +func ExampleCause_printf() { + err := errors.Wrap(func() error { + return func() error { + return errors.Errorf("hello %s", fmt.Sprintf("world")) + }() + }(), "failed") + + fmt.Printf("%v", err) + + // Output: failed: hello world +} diff --git a/vendor/github.com/pkg/errors/format_test.go b/vendor/github.com/pkg/errors/format_test.go new file mode 100644 index 0000000000..15fd7d89d7 --- /dev/null +++ b/vendor/github.com/pkg/errors/format_test.go @@ -0,0 +1,535 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "regexp" + "strings" + "testing" +) + +func TestFormatNew(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + New("error"), + "%s", + "error", + }, { + New("error"), + "%v", + "error", + }, { + New("error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatNew\n" + + "\t.+/github.com/pkg/errors/format_test.go:26", + }, { + New("error"), + "%q", + `"error"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatErrorf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Errorf("%s", "error"), + "%s", + "error", + }, { + Errorf("%s", "error"), + "%v", + "error", + }, { + Errorf("%s", "error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatErrorf\n" + + "\t.+/github.com/pkg/errors/format_test.go:56", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrap(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrap(New("error"), "error2"), + "%s", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%v", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:82", + }, { + Wrap(io.EOF, "error"), + "%s", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%v", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%+v", + "EOF\n" + + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:96", + }, { + Wrap(Wrap(io.EOF, "error1"), "error2"), + "%+v", + "EOF\n" + + "error1\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:103\n", + }, { + Wrap(New("error with space"), "context"), + "%q", + `"context: error with space"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrapf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrapf(io.EOF, "error%d", 2), + "%s", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%v", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%+v", + "EOF\n" + + "error2\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:134", + }, { + Wrapf(New("error"), "error%d", 2), + "%s", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%v", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:149", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWithStack(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithStack(io.EOF), + "%s", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%v", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:175"}, + }, { + WithStack(New("error")), + "%s", + []string{"error"}, + }, { + WithStack(New("error")), + "%v", + []string{"error"}, + }, { + WithStack(New("error")), + "%+v", + []string{"error", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189"}, + }, { + WithStack(WithStack(io.EOF)), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197"}, + }, { + WithStack(WithStack(Wrapf(io.EOF, "message"))), + "%+v", + []string{"EOF", + "message", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205"}, + }, { + WithStack(Errorf("error%d", 1)), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatWithMessage(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithMessage(New("error"), "error2"), + "%s", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%v", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%+v", + []string{ + "error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:244", + "error2"}, + }, { + WithMessage(io.EOF, "addition1"), + "%s", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%v", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%+v", + []string{"EOF", "addition1"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%v", + []string{"addition2: addition1: EOF"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%+v", + []string{"EOF", "addition1", "addition2"}, + }, { + Wrap(WithMessage(io.EOF, "error1"), "error2"), + "%+v", + []string{"EOF", "error1", "error2", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:272"}, + }, { + WithMessage(Errorf("error%d", 1), "error2"), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:278", + "error2"}, + }, { + WithMessage(WithStack(io.EOF), "error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:285", + "error"}, + }, { + WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "inside-error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "outside-error"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatGeneric(t *testing.T) { + starts := []struct { + err error + want []string + }{ + {New("new-error"), []string{ + "new-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:315"}, + }, {Errorf("errorf-error"), []string{ + "errorf-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:319"}, + }, {errors.New("errors-new-error"), []string{ + "errors-new-error"}, + }, + } + + wrappers := []wrapper{ + { + func(err error) error { return WithMessage(err, "with-message") }, + []string{"with-message"}, + }, { + func(err error) error { return WithStack(err) }, + []string{ + "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" + + ".+/github.com/pkg/errors/format_test.go:333", + }, + }, { + func(err error) error { return Wrap(err, "wrap-error") }, + []string{ + "wrap-error", + "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" + + ".+/github.com/pkg/errors/format_test.go:339", + }, + }, { + func(err error) error { return Wrapf(err, "wrapf-error%d", 1) }, + []string{ + "wrapf-error1", + "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" + + ".+/github.com/pkg/errors/format_test.go:346", + }, + }, + } + + for s := range starts { + err := starts[s].err + want := starts[s].want + testFormatCompleteCompare(t, s, err, "%+v", want, false) + testGenericRecursive(t, err, want, wrappers, 3) + } +} + +func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) { + got := fmt.Sprintf(format, arg) + gotLines := strings.SplitN(got, "\n", -1) + wantLines := strings.SplitN(want, "\n", -1) + + if len(wantLines) > len(gotLines) { + t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want) + return + } + + for i, w := range wantLines { + match, err := regexp.MatchString(w, gotLines[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want) + } + } +} + +var stackLineR = regexp.MustCompile(`\.`) + +// parseBlocks parses input into a slice, where: +// - incase entry contains a newline, its a stacktrace +// - incase entry contains no newline, its a solo line. +// +// Detecting stack boundaries only works incase the WithStack-calls are +// to be found on the same line, thats why it is optionally here. +// +// Example use: +// +// for _, e := range blocks { +// if strings.ContainsAny(e, "\n") { +// // Match as stack +// } else { +// // Match as line +// } +// } +// +func parseBlocks(input string, detectStackboundaries bool) ([]string, error) { + var blocks []string + + stack := "" + wasStack := false + lines := map[string]bool{} // already found lines + + for _, l := range strings.Split(input, "\n") { + isStackLine := stackLineR.MatchString(l) + + switch { + case !isStackLine && wasStack: + blocks = append(blocks, stack, l) + stack = "" + lines = map[string]bool{} + case isStackLine: + if wasStack { + // Detecting two stacks after another, possible cause lines match in + // our tests due to WithStack(WithStack(io.EOF)) on same line. + if detectStackboundaries { + if lines[l] { + if len(stack) == 0 { + return nil, errors.New("len of block must not be zero here") + } + + blocks = append(blocks, stack) + stack = l + lines = map[string]bool{l: true} + continue + } + } + + stack = stack + "\n" + l + } else { + stack = l + } + lines[l] = true + case !isStackLine && !wasStack: + blocks = append(blocks, l) + default: + return nil, errors.New("must not happen") + } + + wasStack = isStackLine + } + + // Use up stack + if stack != "" { + blocks = append(blocks, stack) + } + return blocks, nil +} + +func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) { + gotStr := fmt.Sprintf(format, arg) + + got, err := parseBlocks(gotStr, detectStackBoundaries) + if err != nil { + t.Fatal(err) + } + + if len(got) != len(want) { + t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q", + n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr) + } + + for i := range got { + if strings.ContainsAny(want[i], "\n") { + // Match as stack + match, err := regexp.MatchString(want[i], got[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n", + n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want)) + } + } else { + // Match as message + if got[i] != want[i] { + t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i]) + } + } + } +} + +type wrapper struct { + wrap func(err error) error + want []string +} + +func prettyBlocks(blocks []string, prefix ...string) string { + var out []string + + for _, b := range blocks { + out = append(out, fmt.Sprintf("%v", b)) + } + + return " " + strings.Join(out, "\n ") +} + +func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) { + if len(beforeWant) == 0 { + panic("beforeWant must not be empty") + } + for _, w := range list { + if len(w.want) == 0 { + panic("want must not be empty") + } + + err := w.wrap(beforeErr) + + // Copy required cause append(beforeWant, ..) modified beforeWant subtly. + beforeCopy := make([]string, len(beforeWant)) + copy(beforeCopy, beforeWant) + + beforeWant := beforeCopy + last := len(beforeWant) - 1 + var want []string + + // Merge two stacks behind each other. + if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") { + want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...) + } else { + want = append(beforeWant, w.want...) + } + + testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false) + if maxDepth > 0 { + testGenericRecursive(t, err, want, list, maxDepth-1) + } + } +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 0000000000..6b1f2891a5 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,178 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/vendor/github.com/pkg/errors/stack_test.go b/vendor/github.com/pkg/errors/stack_test.go new file mode 100644 index 0000000000..510c27a9f9 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack_test.go @@ -0,0 +1,292 @@ +package errors + +import ( + "fmt" + "runtime" + "testing" +) + +var initpc, _, _, _ = runtime.Caller(0) + +func TestFrameLine(t *testing.T) { + var tests = []struct { + Frame + want int + }{{ + Frame(initpc), + 9, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) + }(), + 20, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(1) + return Frame(pc) + }(), + 28, + }, { + Frame(0), // invalid PC + 0, + }} + + for _, tt := range tests { + got := tt.Frame.line() + want := tt.want + if want != got { + t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got) + } + } +} + +type X struct{} + +func (x X) val() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func (x *X) ptr() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func TestFrameFormat(t *testing.T) { + var tests = []struct { + Frame + format string + want string + }{{ + Frame(initpc), + "%s", + "stack_test.go", + }, { + Frame(initpc), + "%+s", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go", + }, { + Frame(0), + "%s", + "unknown", + }, { + Frame(0), + "%+s", + "unknown", + }, { + Frame(initpc), + "%d", + "9", + }, { + Frame(0), + "%d", + "0", + }, { + Frame(initpc), + "%n", + "init", + }, { + func() Frame { + var x X + return x.ptr() + }(), + "%n", + `\(\*X\).ptr`, + }, { + func() Frame { + var x X + return x.val() + }(), + "%n", + "X.val", + }, { + Frame(0), + "%n", + "", + }, { + Frame(initpc), + "%v", + "stack_test.go:9", + }, { + Frame(initpc), + "%+v", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go:9", + }, { + Frame(0), + "%v", + "unknown:0", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.Frame, tt.format, tt.want) + } +} + +func TestFuncname(t *testing.T) { + tests := []struct { + name, want string + }{ + {"", ""}, + {"runtime.main", "main"}, + {"github.com/pkg/errors.funcname", "funcname"}, + {"funcname", "funcname"}, + {"io.copyBuffer", "copyBuffer"}, + {"main.(*R).Write", "(*R).Write"}, + } + + for _, tt := range tests { + got := funcname(tt.name) + want := tt.want + if got != want { + t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got) + } + } +} + +func TestTrimGOPATH(t *testing.T) { + var tests = []struct { + Frame + want string + }{{ + Frame(initpc), + "github.com/pkg/errors/stack_test.go", + }} + + for i, tt := range tests { + pc := tt.Frame.pc() + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + got := trimGOPATH(fn.Name(), file) + testFormatRegexp(t, i, got, "%s", tt.want) + } +} + +func TestStackTrace(t *testing.T) { + tests := []struct { + err error + want []string + }{{ + New("ooh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:172", + }, + }, { + Wrap(New("ooh"), "ahh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New + }, + }, { + Cause(Wrap(New("ooh"), "ahh")), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New + }, + }, { + func() error { return New("ooh") }(), []string{ + `github.com/pkg/errors.(func·009|TestStackTrace.func1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller + }, + }, { + Cause(func() error { + return func() error { + return Errorf("hello %s", fmt.Sprintf("world")) + }() + }()), []string{ + `github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf + `github.com/pkg/errors.(func·011|TestStackTrace.func2)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller + }, + }} + for i, tt := range tests { + x, ok := tt.err.(interface { + StackTrace() StackTrace + }) + if !ok { + t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err) + continue + } + st := x.StackTrace() + for j, want := range tt.want { + testFormatRegexp(t, i, st[j], "%+v", want) + } + } +} + +func stackTrace() StackTrace { + const depth = 8 + var pcs [depth]uintptr + n := runtime.Callers(1, pcs[:]) + var st stack = pcs[0:n] + return st.StackTrace() +} + +func TestStackTraceFormat(t *testing.T) { + tests := []struct { + StackTrace + format string + want string + }{{ + nil, + "%s", + `\[\]`, + }, { + nil, + "%v", + `\[\]`, + }, { + nil, + "%+v", + "", + }, { + nil, + "%#v", + `\[\]errors.Frame\(nil\)`, + }, { + make(StackTrace, 0), + "%s", + `\[\]`, + }, { + make(StackTrace, 0), + "%v", + `\[\]`, + }, { + make(StackTrace, 0), + "%+v", + "", + }, { + make(StackTrace, 0), + "%#v", + `\[\]errors.Frame{}`, + }, { + stackTrace()[:2], + "%s", + `\[stack_test.go stack_test.go\]`, + }, { + stackTrace()[:2], + "%v", + `\[stack_test.go:225 stack_test.go:272\]`, + }, { + stackTrace()[:2], + "%+v", + "\n" + + "github.com/pkg/errors.stackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:225\n" + + "github.com/pkg/errors.TestStackTraceFormat\n" + + "\t.+/github.com/pkg/errors/stack_test.go:276", + }, { + stackTrace()[:2], + "%#v", + `\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want) + } +} diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTING.md b/vendor/golang.org/x/sync/CONTRIBUTING.md new file mode 100644 index 0000000000..d0485e887a --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/README.md b/vendor/golang.org/x/sync/README.md new file mode 100644 index 0000000000..1f8436cc9c --- /dev/null +++ b/vendor/golang.org/x/sync/README.md @@ -0,0 +1,18 @@ +# Go Sync + +This repository provides Go concurrency primitives in addition to the +ones provided by the language and "sync" and "sync/atomic" packages. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/sync`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the sync repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sync/codereview.cfg b/vendor/golang.org/x/sync/codereview.cfg new file mode 100644 index 0000000000..3f8b14b64e --- /dev/null +++ b/vendor/golang.org/x/sync/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000000..533438d91c --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "sync" + + "golang.org/x/net/context" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go b/vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go new file mode 100644 index 0000000000..714b5aea77 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errgroup_test + +import ( + "crypto/md5" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +// Pipeline demonstrates the use of a Group to implement a multi-stage +// pipeline: a version of the MD5All function with bounded parallelism from +// https://blog.golang.org/pipelines. +func ExampleGroup_pipeline() { + m, err := MD5All(context.Background(), ".") + if err != nil { + log.Fatal(err) + } + + for k, sum := range m { + fmt.Printf("%s:\t%x\n", k, sum) + } +} + +type result struct { + path string + sum [md5.Size]byte +} + +// MD5All reads all the files in the file tree rooted at root and returns a map +// from file path to the MD5 sum of the file's contents. If the directory walk +// fails or any read operation fails, MD5All returns an error. +func MD5All(ctx context.Context, root string) (map[string][md5.Size]byte, error) { + // ctx is canceled when g.Wait() returns. When this version of MD5All returns + // - even in case of error! - we know that all of the goroutines have finished + // and the memory they were using can be garbage-collected. + g, ctx := errgroup.WithContext(ctx) + paths := make(chan string) + + g.Go(func() error { + defer close(paths) + return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() { + return nil + } + select { + case paths <- path: + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }) + + // Start a fixed number of goroutines to read and digest files. + c := make(chan result) + const numDigesters = 20 + for i := 0; i < numDigesters; i++ { + g.Go(func() error { + for path := range paths { + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + select { + case c <- result{path, md5.Sum(data)}: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + } + go func() { + g.Wait() + close(c) + }() + + m := make(map[string][md5.Size]byte) + for r := range c { + m[r.path] = r.sum + } + // Check whether any of the goroutines failed. Since g is accumulating the + // errors, we don't need to send them (or check for them) in the individual + // results sent on the channel. + if err := g.Wait(); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup_test.go b/vendor/golang.org/x/sync/errgroup/errgroup_test.go new file mode 100644 index 0000000000..6a9696efc6 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup_test.go @@ -0,0 +1,176 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errgroup_test + +import ( + "errors" + "fmt" + "net/http" + "os" + "testing" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +var ( + Web = fakeSearch("web") + Image = fakeSearch("image") + Video = fakeSearch("video") +) + +type Result string +type Search func(ctx context.Context, query string) (Result, error) + +func fakeSearch(kind string) Search { + return func(_ context.Context, query string) (Result, error) { + return Result(fmt.Sprintf("%s result for %q", kind, query)), nil + } +} + +// JustErrors illustrates the use of a Group in place of a sync.WaitGroup to +// simplify goroutine counting and error handling. This example is derived from +// the sync.WaitGroup example at https://golang.org/pkg/sync/#example_WaitGroup. +func ExampleGroup_justErrors() { + var g errgroup.Group + var urls = []string{ + "http://www.golang.org/", + "http://www.google.com/", + "http://www.somestupidname.com/", + } + for _, url := range urls { + // Launch a goroutine to fetch the URL. + url := url // https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + // Fetch the URL. + resp, err := http.Get(url) + if err == nil { + resp.Body.Close() + } + return err + }) + } + // Wait for all HTTP fetches to complete. + if err := g.Wait(); err == nil { + fmt.Println("Successfully fetched all URLs.") + } +} + +// Parallel illustrates the use of a Group for synchronizing a simple parallel +// task: the "Google Search 2.0" function from +// https://talks.golang.org/2012/concurrency.slide#46, augmented with a Context +// and error-handling. +func ExampleGroup_parallel() { + Google := func(ctx context.Context, query string) ([]Result, error) { + g, ctx := errgroup.WithContext(ctx) + + searches := []Search{Web, Image, Video} + results := make([]Result, len(searches)) + for i, search := range searches { + i, search := i, search // https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + result, err := search(ctx, query) + if err == nil { + results[i] = result + } + return err + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + return results, nil + } + + results, err := Google(context.Background(), "golang") + if err != nil { + fmt.Fprintln(os.Stderr, err) + return + } + for _, result := range results { + fmt.Println(result) + } + + // Output: + // web result for "golang" + // image result for "golang" + // video result for "golang" +} + +func TestZeroGroup(t *testing.T) { + err1 := errors.New("errgroup_test: 1") + err2 := errors.New("errgroup_test: 2") + + cases := []struct { + errs []error + }{ + {errs: []error{}}, + {errs: []error{nil}}, + {errs: []error{err1}}, + {errs: []error{err1, nil}}, + {errs: []error{err1, nil, err2}}, + } + + for _, tc := range cases { + var g errgroup.Group + + var firstErr error + for i, err := range tc.errs { + err := err + g.Go(func() error { return err }) + + if firstErr == nil && err != nil { + firstErr = err + } + + if gErr := g.Wait(); gErr != firstErr { + t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+ + "g.Wait() = %v; want %v", + g, tc.errs[:i+1], err, firstErr) + } + } + } +} + +func TestWithContext(t *testing.T) { + errDoom := errors.New("group_test: doomed") + + cases := []struct { + errs []error + want error + }{ + {want: nil}, + {errs: []error{nil}, want: nil}, + {errs: []error{errDoom}, want: errDoom}, + {errs: []error{errDoom, nil}, want: errDoom}, + } + + for _, tc := range cases { + g, ctx := errgroup.WithContext(context.Background()) + + for _, err := range tc.errs { + err := err + g.Go(func() error { return err }) + } + + if err := g.Wait(); err != tc.want { + t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+ + "g.Wait() = %v; want %v", + g, tc.errs, err, tc.want) + } + + canceled := false + select { + case <-ctx.Done(): + canceled = true + default: + } + if !canceled { + t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+ + "ctx.Done() was not closed", + g, tc.errs) + } + } +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 0000000000..e9d2d79a97 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "sync" + + // Use the old context because packages that depend on this one + // (e.g. cloud.google.com/go/...) must run on Go 1.6. + // TODO(jba): update to "context" when possible. + "golang.org/x/net/context" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking only until ctx +// is done. On success, returns nil. On failure, returns ctx.Err() and leaves +// the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: bad release") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go new file mode 100644 index 0000000000..1e3ab75f5d --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package semaphore_test + +import ( + "fmt" + "testing" + + "golang.org/x/net/context" + "golang.org/x/sync/semaphore" +) + +// weighted is an interface matching a subset of *Weighted. It allows +// alternate implementations for testing and benchmarking. +type weighted interface { + Acquire(context.Context, int64) error + TryAcquire(int64) bool + Release(int64) +} + +// semChan implements Weighted using a channel for +// comparing against the condition variable-based implementation. +type semChan chan struct{} + +func newSemChan(n int64) semChan { + return semChan(make(chan struct{}, n)) +} + +func (s semChan) Acquire(_ context.Context, n int64) error { + for i := int64(0); i < n; i++ { + s <- struct{}{} + } + return nil +} + +func (s semChan) TryAcquire(n int64) bool { + if int64(len(s))+n > int64(cap(s)) { + return false + } + + for i := int64(0); i < n; i++ { + s <- struct{}{} + } + return true +} + +func (s semChan) Release(n int64) { + for i := int64(0); i < n; i++ { + <-s + } +} + +// acquireN calls Acquire(size) on sem N times and then calls Release(size) N times. +func acquireN(b *testing.B, sem weighted, size int64, N int) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < N; j++ { + sem.Acquire(context.Background(), size) + } + for j := 0; j < N; j++ { + sem.Release(size) + } + } +} + +// tryAcquireN calls TryAcquire(size) on sem N times and then calls Release(size) N times. +func tryAcquireN(b *testing.B, sem weighted, size int64, N int) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < N; j++ { + if !sem.TryAcquire(size) { + b.Fatalf("TryAcquire(%v) = false, want true", size) + } + } + for j := 0; j < N; j++ { + sem.Release(size) + } + } +} + +func BenchmarkNewSeq(b *testing.B) { + for _, cap := range []int64{1, 128} { + b.Run(fmt.Sprintf("Weighted-%d", cap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = semaphore.NewWeighted(cap) + } + }) + b.Run(fmt.Sprintf("semChan-%d", cap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = newSemChan(cap) + } + }) + } +} + +func BenchmarkAcquireSeq(b *testing.B) { + for _, c := range []struct { + cap, size int64 + N int + }{ + {1, 1, 1}, + {2, 1, 1}, + {16, 1, 1}, + {128, 1, 1}, + {2, 2, 1}, + {16, 2, 8}, + {128, 2, 64}, + {2, 1, 2}, + {16, 8, 2}, + {128, 64, 2}, + } { + for _, w := range []struct { + name string + w weighted + }{ + {"Weighted", semaphore.NewWeighted(c.cap)}, + {"semChan", newSemChan(c.cap)}, + } { + b.Run(fmt.Sprintf("%s-acquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) { + acquireN(b, w.w, c.size, c.N) + }) + b.Run(fmt.Sprintf("%s-tryAcquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) { + tryAcquireN(b, w.w, c.size, c.N) + }) + } + } +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go new file mode 100644 index 0000000000..e75cd79f5b --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go @@ -0,0 +1,84 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package semaphore_test + +import ( + "context" + "fmt" + "log" + "runtime" + + "golang.org/x/sync/semaphore" +) + +// Example_workerPool demonstrates how to use a semaphore to limit the number of +// goroutines working on parallel tasks. +// +// This use of a semaphore mimics a typical “worker pool” pattern, but without +// the need to explicitly shut down idle workers when the work is done. +func Example_workerPool() { + ctx := context.TODO() + + var ( + maxWorkers = runtime.GOMAXPROCS(0) + sem = semaphore.NewWeighted(int64(maxWorkers)) + out = make([]int, 32) + ) + + // Compute the output using up to maxWorkers goroutines at a time. + for i := range out { + // When maxWorkers goroutines are in flight, Acquire blocks until one of the + // workers finishes. + if err := sem.Acquire(ctx, 1); err != nil { + log.Printf("Failed to acquire semaphore: %v", err) + break + } + + go func(i int) { + defer sem.Release(1) + out[i] = collatzSteps(i + 1) + }(i) + } + + // Acquire all of the tokens to wait for any remaining workers to finish. + // + // If you are already waiting for the workers by some other means (such as an + // errgroup.Group), you can omit this final Acquire call. + if err := sem.Acquire(ctx, int64(maxWorkers)); err != nil { + log.Printf("Failed to acquire semaphore: %v", err) + } + + fmt.Println(out) + + // Output: + // [0 1 7 2 5 8 16 3 19 6 14 9 9 17 17 4 12 20 20 7 7 15 15 10 23 10 111 18 18 18 106 5] +} + +// collatzSteps computes the number of steps to reach 1 under the Collatz +// conjecture. (See https://en.wikipedia.org/wiki/Collatz_conjecture.) +func collatzSteps(n int) (steps int) { + if n <= 0 { + panic("nonpositive input") + } + + for ; n > 1; steps++ { + if steps < 0 { + panic("too many steps") + } + + if n%2 == 0 { + n /= 2 + continue + } + + const maxInt = int(^uint(0) >> 1) + if n > (maxInt-1)/3 { + panic("overflow") + } + n = 3*n + 1 + } + + return steps +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_test.go new file mode 100644 index 0000000000..2541b9068f --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_test.go @@ -0,0 +1,171 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package semaphore_test + +import ( + "math/rand" + "runtime" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +const maxSleep = 1 * time.Millisecond + +func HammerWeighted(sem *semaphore.Weighted, n int64, loops int) { + for i := 0; i < loops; i++ { + sem.Acquire(context.Background(), n) + time.Sleep(time.Duration(rand.Int63n(int64(maxSleep/time.Nanosecond))) * time.Nanosecond) + sem.Release(n) + } +} + +func TestWeighted(t *testing.T) { + t.Parallel() + + n := runtime.GOMAXPROCS(0) + loops := 10000 / n + sem := semaphore.NewWeighted(int64(n)) + var wg sync.WaitGroup + wg.Add(n) + for i := 0; i < n; i++ { + i := i + go func() { + defer wg.Done() + HammerWeighted(sem, int64(i), loops) + }() + } + wg.Wait() +} + +func TestWeightedPanic(t *testing.T) { + t.Parallel() + + defer func() { + if recover() == nil { + t.Fatal("release of an unacquired weighted semaphore did not panic") + } + }() + w := semaphore.NewWeighted(1) + w.Release(1) +} + +func TestWeightedTryAcquire(t *testing.T) { + t.Parallel() + + ctx := context.Background() + sem := semaphore.NewWeighted(2) + tries := []bool{} + sem.Acquire(ctx, 1) + tries = append(tries, sem.TryAcquire(1)) + tries = append(tries, sem.TryAcquire(1)) + + sem.Release(2) + + tries = append(tries, sem.TryAcquire(1)) + sem.Acquire(ctx, 1) + tries = append(tries, sem.TryAcquire(1)) + + want := []bool{true, false, true, false} + for i := range tries { + if tries[i] != want[i] { + t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i]) + } + } +} + +func TestWeightedAcquire(t *testing.T) { + t.Parallel() + + ctx := context.Background() + sem := semaphore.NewWeighted(2) + tryAcquire := func(n int64) bool { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + return sem.Acquire(ctx, n) == nil + } + + tries := []bool{} + sem.Acquire(ctx, 1) + tries = append(tries, tryAcquire(1)) + tries = append(tries, tryAcquire(1)) + + sem.Release(2) + + tries = append(tries, tryAcquire(1)) + sem.Acquire(ctx, 1) + tries = append(tries, tryAcquire(1)) + + want := []bool{true, false, true, false} + for i := range tries { + if tries[i] != want[i] { + t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i]) + } + } +} + +func TestWeightedDoesntBlockIfTooBig(t *testing.T) { + t.Parallel() + + const n = 2 + sem := semaphore.NewWeighted(n) + { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go sem.Acquire(ctx, n+1) + } + + g, ctx := errgroup.WithContext(context.Background()) + for i := n * 3; i > 0; i-- { + g.Go(func() error { + err := sem.Acquire(ctx, 1) + if err == nil { + time.Sleep(1 * time.Millisecond) + sem.Release(1) + } + return err + }) + } + if err := g.Wait(); err != nil { + t.Errorf("semaphore.NewWeighted(%v) failed to AcquireCtx(_, 1) with AcquireCtx(_, %v) pending", n, n+1) + } +} + +// TestLargeAcquireDoesntStarve times out if a large call to Acquire starves. +// Merely returning from the test function indicates success. +func TestLargeAcquireDoesntStarve(t *testing.T) { + t.Parallel() + + ctx := context.Background() + n := int64(runtime.GOMAXPROCS(0)) + sem := semaphore.NewWeighted(n) + running := true + + var wg sync.WaitGroup + wg.Add(int(n)) + for i := n; i > 0; i-- { + sem.Acquire(ctx, 1) + go func() { + defer func() { + sem.Release(1) + wg.Done() + }() + for running { + time.Sleep(1 * time.Millisecond) + sem.Release(1) + sem.Acquire(ctx, 1) + } + }() + } + + sem.Acquire(ctx, n) + running = false + sem.Release(n) + wg.Wait() +} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 0000000000..9a4f8d59e0 --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,111 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + delete(g.m, key) + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight_test.go b/vendor/golang.org/x/sync/singleflight/singleflight_test.go new file mode 100644 index 0000000000..5e6f1b328e --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight_test.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestDo(t *testing.T) { + var g Group + v, err, _ := g.Do("key", func() (interface{}, error) { + return "bar", nil + }) + if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { + t.Errorf("Do = %v; want %v", got, want) + } + if err != nil { + t.Errorf("Do error = %v", err) + } +} + +func TestDoErr(t *testing.T) { + var g Group + someErr := errors.New("Some error") + v, err, _ := g.Do("key", func() (interface{}, error) { + return nil, someErr + }) + if err != someErr { + t.Errorf("Do error = %v; want someErr %v", err, someErr) + } + if v != nil { + t.Errorf("unexpected non-nil value %#v", v) + } +} + +func TestDoDupSuppress(t *testing.T) { + var g Group + var wg1, wg2 sync.WaitGroup + c := make(chan string, 1) + var calls int32 + fn := func() (interface{}, error) { + if atomic.AddInt32(&calls, 1) == 1 { + // First invocation. + wg1.Done() + } + v := <-c + c <- v // pump; make available for any future calls + + time.Sleep(10 * time.Millisecond) // let more goroutines enter Do + + return v, nil + } + + const n = 10 + wg1.Add(1) + for i := 0; i < n; i++ { + wg1.Add(1) + wg2.Add(1) + go func() { + defer wg2.Done() + wg1.Done() + v, err, _ := g.Do("key", fn) + if err != nil { + t.Errorf("Do error: %v", err) + return + } + if s, _ := v.(string); s != "bar" { + t.Errorf("Do = %T %v; want %q", v, v, "bar") + } + }() + } + wg1.Wait() + // At least one goroutine is in fn now and all of them have at + // least reached the line before the Do. + c <- "bar" + wg2.Wait() + if got := atomic.LoadInt32(&calls); got <= 0 || got >= n { + t.Errorf("number of calls = %d; want over 0 and less than %d", got, n) + } +} diff --git a/vendor/golang.org/x/sync/syncmap/map.go b/vendor/golang.org/x/sync/syncmap/map.go new file mode 100644 index 0000000000..80e15847ef --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map.go @@ -0,0 +1,372 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package syncmap provides a concurrent map implementation. +// It is a prototype for a proposed addition to the sync package +// in the standard library. +// (https://golang.org/issue/18177) +package syncmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Map is a concurrent map with amortized-constant-time loads, stores, and deletes. +// It is safe for multiple goroutines to call a Map's methods concurrently. +// +// The zero Map is valid and empty. +// +// A Map must not be copied after first use. +type Map struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[interface{}]*entry + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnly struct { + m map[interface{}]*entry + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var expunged = unsafe.Pointer(new(interface{})) + +// An entry is a slot in the map corresponding to a particular key. +type entry struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntry(i interface{}) *entry { + return &entry{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map) Load(key interface{}) (value interface{}, ok bool) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return nil, false + } + return e.load() +} + +func (e *entry) load() (value interface{}, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return nil, false + } + return *(*interface{})(p), true +} + +// Store sets the value for a key. +func (m *Map) Store(key, value interface{}) { + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entry) tryStore(i *interface{}) bool { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + for { + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expunged, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entry) storeLocked(i *interface{}) { + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + } +} + +// Delete deletes the value for a key. +func (m *Map) Delete(key interface{}) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entry) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map) Range(f func(key, value interface{}) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnly) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if read.amended { + read = readOnly{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *Map) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnly{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *Map) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnly) + m.dirty = make(map[interface{}]*entry, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entry) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expunged +} diff --git a/vendor/golang.org/x/sync/syncmap/map_bench_test.go b/vendor/golang.org/x/sync/syncmap/map_bench_test.go new file mode 100644 index 0000000000..b279b4f749 --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map_bench_test.go @@ -0,0 +1,216 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syncmap_test + +import ( + "fmt" + "reflect" + "sync/atomic" + "testing" + + "golang.org/x/sync/syncmap" +) + +type bench struct { + setup func(*testing.B, mapInterface) + perG func(b *testing.B, pb *testing.PB, i int, m mapInterface) +} + +func benchMap(b *testing.B, bench bench) { + for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &syncmap.Map{}} { + b.Run(fmt.Sprintf("%T", m), func(b *testing.B) { + m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface) + if bench.setup != nil { + bench.setup(b, m) + } + + b.ResetTimer() + + var i int64 + b.RunParallel(func(pb *testing.PB) { + id := int(atomic.AddInt64(&i, 1) - 1) + bench.perG(b, pb, id*b.N, m) + }) + }) + } +} + +func BenchmarkLoadMostlyHits(b *testing.B) { + const hits, misses = 1023, 1 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < hits; i++ { + m.LoadOrStore(i, i) + } + // Prime the map to get it into a steady state. + for i := 0; i < hits*2; i++ { + m.Load(i % hits) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Load(i % (hits + misses)) + } + }, + }) +} + +func BenchmarkLoadMostlyMisses(b *testing.B) { + const hits, misses = 1, 1023 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < hits; i++ { + m.LoadOrStore(i, i) + } + // Prime the map to get it into a steady state. + for i := 0; i < hits*2; i++ { + m.Load(i % hits) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Load(i % (hits + misses)) + } + }, + }) +} + +func BenchmarkLoadOrStoreBalanced(b *testing.B) { + const hits, misses = 128, 128 + + benchMap(b, bench{ + setup: func(b *testing.B, m mapInterface) { + if _, ok := m.(*DeepCopyMap); ok { + b.Skip("DeepCopyMap has quadratic running time.") + } + for i := 0; i < hits; i++ { + m.LoadOrStore(i, i) + } + // Prime the map to get it into a steady state. + for i := 0; i < hits*2; i++ { + m.Load(i % hits) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + j := i % (hits + misses) + if j < hits { + if _, ok := m.LoadOrStore(j, i); !ok { + b.Fatalf("unexpected miss for %v", j) + } + } else { + if v, loaded := m.LoadOrStore(i, i); loaded { + b.Fatalf("failed to store %v: existing value %v", i, v) + } + } + } + }, + }) +} + +func BenchmarkLoadOrStoreUnique(b *testing.B) { + benchMap(b, bench{ + setup: func(b *testing.B, m mapInterface) { + if _, ok := m.(*DeepCopyMap); ok { + b.Skip("DeepCopyMap has quadratic running time.") + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.LoadOrStore(i, i) + } + }, + }) +} + +func BenchmarkLoadOrStoreCollision(b *testing.B) { + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + m.LoadOrStore(0, 0) + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.LoadOrStore(0, 0) + } + }, + }) +} + +func BenchmarkRange(b *testing.B) { + const mapSize = 1 << 10 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < mapSize; i++ { + m.Store(i, i) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Range(func(_, _ interface{}) bool { return true }) + } + }, + }) +} + +// BenchmarkAdversarialAlloc tests performance when we store a new value +// immediately whenever the map is promoted to clean and otherwise load a +// unique, missing key. +// +// This forces the Load calls to always acquire the map's mutex. +func BenchmarkAdversarialAlloc(b *testing.B) { + benchMap(b, bench{ + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + var stores, loadsSinceStore int64 + for ; pb.Next(); i++ { + m.Load(i) + if loadsSinceStore++; loadsSinceStore > stores { + m.LoadOrStore(i, stores) + loadsSinceStore = 0 + stores++ + } + } + }, + }) +} + +// BenchmarkAdversarialDelete tests performance when we periodically delete +// one key and add a different one in a large map. +// +// This forces the Load calls to always acquire the map's mutex and periodically +// makes a full copy of the map despite changing only one entry. +func BenchmarkAdversarialDelete(b *testing.B) { + const mapSize = 1 << 10 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < mapSize; i++ { + m.Store(i, i) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Load(i) + + if i%mapSize == 0 { + m.Range(func(k, _ interface{}) bool { + m.Delete(k) + return false + }) + m.Store(i, i) + } + } + }, + }) +} diff --git a/vendor/golang.org/x/sync/syncmap/map_reference_test.go b/vendor/golang.org/x/sync/syncmap/map_reference_test.go new file mode 100644 index 0000000000..923c51b70e --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map_reference_test.go @@ -0,0 +1,151 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syncmap_test + +import ( + "sync" + "sync/atomic" +) + +// This file contains reference map implementations for unit-tests. + +// mapInterface is the interface Map implements. +type mapInterface interface { + Load(interface{}) (interface{}, bool) + Store(key, value interface{}) + LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) + Delete(interface{}) + Range(func(key, value interface{}) (shouldContinue bool)) +} + +// RWMutexMap is an implementation of mapInterface using a sync.RWMutex. +type RWMutexMap struct { + mu sync.RWMutex + dirty map[interface{}]interface{} +} + +func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) { + m.mu.RLock() + value, ok = m.dirty[key] + m.mu.RUnlock() + return +} + +func (m *RWMutexMap) Store(key, value interface{}) { + m.mu.Lock() + if m.dirty == nil { + m.dirty = make(map[interface{}]interface{}) + } + m.dirty[key] = value + m.mu.Unlock() +} + +func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + m.mu.Lock() + actual, loaded = m.dirty[key] + if !loaded { + actual = value + if m.dirty == nil { + m.dirty = make(map[interface{}]interface{}) + } + m.dirty[key] = value + } + m.mu.Unlock() + return actual, loaded +} + +func (m *RWMutexMap) Delete(key interface{}) { + m.mu.Lock() + delete(m.dirty, key) + m.mu.Unlock() +} + +func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) { + m.mu.RLock() + keys := make([]interface{}, 0, len(m.dirty)) + for k := range m.dirty { + keys = append(keys, k) + } + m.mu.RUnlock() + + for _, k := range keys { + v, ok := m.Load(k) + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +// DeepCopyMap is an implementation of mapInterface using a Mutex and +// atomic.Value. It makes deep copies of the map on every write to avoid +// acquiring the Mutex in Load. +type DeepCopyMap struct { + mu sync.Mutex + clean atomic.Value +} + +func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + value, ok = clean[key] + return value, ok +} + +func (m *DeepCopyMap) Store(key, value interface{}) { + m.mu.Lock() + dirty := m.dirty() + dirty[key] = value + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + actual, loaded = clean[key] + if loaded { + return actual, loaded + } + + m.mu.Lock() + // Reload clean in case it changed while we were waiting on m.mu. + clean, _ = m.clean.Load().(map[interface{}]interface{}) + actual, loaded = clean[key] + if !loaded { + dirty := m.dirty() + dirty[key] = value + actual = value + m.clean.Store(dirty) + } + m.mu.Unlock() + return actual, loaded +} + +func (m *DeepCopyMap) Delete(key interface{}) { + m.mu.Lock() + dirty := m.dirty() + delete(dirty, key) + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + for k, v := range clean { + if !f(k, v) { + break + } + } +} + +func (m *DeepCopyMap) dirty() map[interface{}]interface{} { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + dirty := make(map[interface{}]interface{}, len(clean)+1) + for k, v := range clean { + dirty[k] = v + } + return dirty +} diff --git a/vendor/golang.org/x/sync/syncmap/map_test.go b/vendor/golang.org/x/sync/syncmap/map_test.go new file mode 100644 index 0000000000..c883f176f5 --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map_test.go @@ -0,0 +1,172 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syncmap_test + +import ( + "math/rand" + "reflect" + "runtime" + "sync" + "testing" + "testing/quick" + + "golang.org/x/sync/syncmap" +) + +type mapOp string + +const ( + opLoad = mapOp("Load") + opStore = mapOp("Store") + opLoadOrStore = mapOp("LoadOrStore") + opDelete = mapOp("Delete") +) + +var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opDelete} + +// mapCall is a quick.Generator for calls on mapInterface. +type mapCall struct { + op mapOp + k, v interface{} +} + +func (c mapCall) apply(m mapInterface) (interface{}, bool) { + switch c.op { + case opLoad: + return m.Load(c.k) + case opStore: + m.Store(c.k, c.v) + return nil, false + case opLoadOrStore: + return m.LoadOrStore(c.k, c.v) + case opDelete: + m.Delete(c.k) + return nil, false + default: + panic("invalid mapOp") + } +} + +type mapResult struct { + value interface{} + ok bool +} + +func randValue(r *rand.Rand) interface{} { + b := make([]byte, r.Intn(4)) + for i := range b { + b[i] = 'a' + byte(rand.Intn(26)) + } + return string(b) +} + +func (mapCall) Generate(r *rand.Rand, size int) reflect.Value { + c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)} + switch c.op { + case opStore, opLoadOrStore: + c.v = randValue(r) + } + return reflect.ValueOf(c) +} + +func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) { + for _, c := range calls { + v, ok := c.apply(m) + results = append(results, mapResult{v, ok}) + } + + final = make(map[interface{}]interface{}) + m.Range(func(k, v interface{}) bool { + final[k] = v + return true + }) + + return results, final +} + +func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { + return applyCalls(new(syncmap.Map), calls) +} + +func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { + return applyCalls(new(RWMutexMap), calls) +} + +func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { + return applyCalls(new(DeepCopyMap), calls) +} + +func TestMapMatchesRWMutex(t *testing.T) { + if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil { + t.Error(err) + } +} + +func TestMapMatchesDeepCopy(t *testing.T) { + if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil { + t.Error(err) + } +} + +func TestConcurrentRange(t *testing.T) { + const mapSize = 1 << 10 + + m := new(syncmap.Map) + for n := int64(1); n <= mapSize; n++ { + m.Store(n, int64(n)) + } + + done := make(chan struct{}) + var wg sync.WaitGroup + defer func() { + close(done) + wg.Wait() + }() + for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- { + r := rand.New(rand.NewSource(g)) + wg.Add(1) + go func(g int64) { + defer wg.Done() + for i := int64(0); ; i++ { + select { + case <-done: + return + default: + } + for n := int64(1); n < mapSize; n++ { + if r.Int63n(mapSize) == 0 { + m.Store(n, n*i*g) + } else { + m.Load(n) + } + } + } + }(g) + } + + iters := 1 << 10 + if testing.Short() { + iters = 16 + } + for n := iters; n > 0; n-- { + seen := make(map[int64]bool, mapSize) + + m.Range(func(ki, vi interface{}) bool { + k, v := ki.(int64), vi.(int64) + if v%k != 0 { + t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) + } + if seen[k] { + t.Fatalf("Range visited key %v twice", k) + } + seen[k] = true + return true + }) + + if len(seen) != mapSize { + t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) + } + } +}