Skip to content
This repository has been archived by the owner on Jan 11, 2023. It is now read-only.

Specifying 256GB instead of 128 for etcd disk #2435

Merged
merged 6 commits into from
Apr 2, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/clusterdefinition.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ Here are the valid values for the orchestrator types:
|enableRbac|no|Enable [Kubernetes RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) (boolean - default == true) |
|enableAggregatedAPIs|no|Enable [Kubernetes Aggregated APIs](https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/).This is required by [Service Catalog](https://github.com/kubernetes-incubator/service-catalog/blob/master/README.md). (boolean - default == false) |
|enableDataEncryptionAtRest|no|Enable [kuberetes data encryption at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).This is currently an alpha feature. (boolean - default == false) |
|etcdDiskSizeGB|no|Size in GB to assign to etcd data volume. Defaults (if no user value provided) are: 256 GB for clusters up to 3 nodes; 512 GB for clusters with between 4 and 10 nodes; 1024 GB for clusters with between 11 and 20 nodes; and 2048 GB for clusters with more than 20 nodes|
|privateCluster|no|Build a cluster without public addresses assigned. See `privateClusters` [below](#feat-private-cluster).|
|maxPods|no|The maximum number of pods per node. The minimum valid value, necessary for running kube-system pods, is 5. Default value is 30 when networkPolicy equals azure, 110 otherwise.|
|gcHighThreshold|no|Sets the --image-gc-high-threshold value on the kublet configuration. Default is 85. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) |
Expand Down
8 changes: 7 additions & 1 deletion pkg/acsengine/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,13 @@ const (
// DefaultEtcdVersion specifies the default etcd version to install
DefaultEtcdVersion = "3.2.16"
// DefaultEtcdDiskSize specifies the default size for Kubernetes master etcd disk volumes in GB
DefaultEtcdDiskSize = "128"
DefaultEtcdDiskSize = "256"
// DefaultEtcdDiskSizeGT3Nodes = size for Kubernetes master etcd disk volumes in GB if > 3 nodes
DefaultEtcdDiskSizeGT3Nodes = "512"
// DefaultEtcdDiskSizeGT10Nodes = size for Kubernetes master etcd disk volumes in GB if > 10 nodes
DefaultEtcdDiskSizeGT10Nodes = "1024"
// DefaultEtcdDiskSizeGT20Nodes = size for Kubernetes master etcd disk volumes in GB if > 20 nodes
DefaultEtcdDiskSizeGT20Nodes = "2048"
// DefaultReschedulerAddonName is the name of the rescheduler addon deployment
DefaultReschedulerAddonName = "rescheduler"
// DefaultMetricsServerAddonName is the name of the kubernetes Metrics server addon deployment
Expand Down
11 changes: 10 additions & 1 deletion pkg/acsengine/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,16 @@ func setOrchestratorDefaults(cs *api.ContainerService) {
}

if "" == a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB {
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = DefaultEtcdDiskSize
switch {
case a.TotalNodes() > 20:
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = DefaultEtcdDiskSizeGT20Nodes
case a.TotalNodes() > 10:
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = DefaultEtcdDiskSizeGT10Nodes
case a.TotalNodes() > 3:
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = DefaultEtcdDiskSizeGT3Nodes
default:
a.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = DefaultEtcdDiskSize
}
}

if a.OrchestratorProfile.KubernetesConfig.PrivateJumpboxProvision() && a.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.OSDiskSizeGB == 0 {
Expand Down
57 changes: 57 additions & 0 deletions pkg/acsengine/defaults_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,63 @@ func TestKubeletFeatureGatesEnsureMasterAndAgentConfigUsedFor1_6_0(t *testing.T)
}
}

func TestEtcdDiskSize(t *testing.T) {
mockCS := getMockBaseContainerService("1.8.10")
properties := mockCS.Properties
properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
properties.MasterProfile.Count = 1
setOrchestratorDefaults(&mockCS)
if properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB != DefaultEtcdDiskSize {
t.Fatalf("EtcdDiskSizeGB did not have the expected size, got %s, expected %s",
properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB, DefaultEtcdDiskSize)
}

mockCS = getMockBaseContainerService("1.8.10")
properties = mockCS.Properties
properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
properties.MasterProfile.Count = 5
setOrchestratorDefaults(&mockCS)
if properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB != DefaultEtcdDiskSizeGT3Nodes {
t.Fatalf("EtcdDiskSizeGB did not have the expected size, got %s, expected %s",
properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB, DefaultEtcdDiskSizeGT3Nodes)
}

mockCS = getMockBaseContainerService("1.8.10")
properties = mockCS.Properties
properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
properties.MasterProfile.Count = 5
properties.AgentPoolProfiles[0].Count = 6
setOrchestratorDefaults(&mockCS)
if properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB != DefaultEtcdDiskSizeGT10Nodes {
t.Fatalf("EtcdDiskSizeGB did not have the expected size, got %s, expected %s",
properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB, DefaultEtcdDiskSizeGT10Nodes)
}

mockCS = getMockBaseContainerService("1.8.10")
properties = mockCS.Properties
properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
properties.MasterProfile.Count = 5
properties.AgentPoolProfiles[0].Count = 16
setOrchestratorDefaults(&mockCS)
if properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB != DefaultEtcdDiskSizeGT20Nodes {
t.Fatalf("EtcdDiskSizeGB did not have the expected size, got %s, expected %s",
properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB, DefaultEtcdDiskSizeGT20Nodes)
}

mockCS = getMockBaseContainerService("1.8.10")
properties = mockCS.Properties
properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
properties.MasterProfile.Count = 5
properties.AgentPoolProfiles[0].Count = 50
customEtcdDiskSize := "512"
properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB = customEtcdDiskSize
setOrchestratorDefaults(&mockCS)
if properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB != customEtcdDiskSize {
t.Fatalf("EtcdDiskSizeGB did not have the expected size, got %s, expected %s",
properties.OrchestratorProfile.KubernetesConfig.EtcdDiskSizeGB, customEtcdDiskSize)
}
}

func getMockAddon(name string) api.KubernetesAddon {
return api.KubernetesAddon{
Name: name,
Expand Down
9 changes: 1 addition & 8 deletions pkg/acsengine/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -676,14 +676,7 @@ func getParameters(cs *api.ContainerService, isClassicMode bool, generatorCode s
addValue(parametersMap, "jumpboxStorageProfile", cs.Properties.OrchestratorProfile.KubernetesConfig.PrivateCluster.JumpboxProfile.StorageProfile)
}
if cs.Properties.HostedMasterProfile == nil {
var totalNodes int
if cs.Properties.MasterProfile != nil {
totalNodes = cs.Properties.MasterProfile.Count
}
for _, pool := range cs.Properties.AgentPoolProfiles {
totalNodes = totalNodes + pool.Count
}
addValue(parametersMap, "totalNodes", totalNodes)
addValue(parametersMap, "totalNodes", cs.Properties.TotalNodes())
}

if properties.OrchestratorProfile.KubernetesConfig == nil ||
Expand Down
12 changes: 12 additions & 0 deletions pkg/api/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -564,6 +564,18 @@ func (p *Properties) HasStorageAccountDisks() bool {
return false
}

// TotalNodes returns the total number of nodes in the cluster configuration
func (p *Properties) TotalNodes() int {
var totalNodes int
if p.MasterProfile != nil {
totalNodes = p.MasterProfile.Count
}
for _, pool := range p.AgentPoolProfiles {
totalNodes = totalNodes + pool.Count
}
return totalNodes
}

// IsCustomVNET returns true if the customer brought their own VNET
func (m *MasterProfile) IsCustomVNET() bool {
return len(m.VnetSubnetID) > 0
Expand Down