-
Notifications
You must be signed in to change notification settings - Fork 558
E2E test - 50 nodes #2260
E2E test - 50 nodes #2260
Changes from 11 commits
472504a
95b4ab4
a61c0eb
1ef0e3b
de59b80
883c655
7640178
5cb10d3
f97bbc7
0a8e718
c8e1c70
d864494
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
{ | ||
"apiVersion": "vlabs", | ||
"properties": { | ||
"orchestratorProfile": { | ||
"orchestratorType": "Kubernetes", | ||
"orchestratorRelease": "1.8" | ||
}, | ||
"masterProfile": { | ||
"count": 5, | ||
"dnsPrefix": "", | ||
"vmSize": "Standard_D2_v2", | ||
"OSDiskSizeGB": 200 | ||
}, | ||
"agentPoolProfiles": [ | ||
{ | ||
"name": "agentpool1", | ||
"count": 50, | ||
"vmSize": "Standard_D2_v2", | ||
"osType": "Linux", | ||
"storageProfile": "ManagedDisks", | ||
"availabilityProfile": "AvailabilitySet" | ||
} | ||
], | ||
"linuxProfile": { | ||
"adminUsername": "azureuser", | ||
"ssh": { | ||
"publicKeys": [ | ||
{ | ||
"keyData": "" | ||
} | ||
] | ||
} | ||
}, | ||
"servicePrincipalProfile": { | ||
"clientId": "", | ||
"secret": "" | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -178,6 +178,18 @@ func (e *Engine) HasTiller() bool { | |
return false | ||
} | ||
|
||
// TillerMaxHistory will return true if tiller addon is enabled | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The comment isn't consistent with the code here |
||
func (e *Engine) TillerMaxHistory() string { | ||
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { | ||
if addon.Name == "tiller" { | ||
if addon.Config != nil { | ||
return addon.Config["max-history"] | ||
} | ||
} | ||
} | ||
return "5" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why return 5 here? Why not "-1" or nil? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actually just realized this is the default max history. In that case it should be inside the for loop (return 5 if addon tiller was found but config is nil) and then return -1 is no tiller addon was found. |
||
} | ||
|
||
// HasACIConnector will return true if aci-connector addon is enabled | ||
func (e *Engine) HasACIConnector() bool { | ||
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -30,7 +30,8 @@ type Connection struct { | |
func NewConnection(host, port, user, keyPath string) (*Connection, error) { | ||
conn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")) | ||
if err != nil { | ||
log.Fatal(err) | ||
log.Println("unable to establish net connection, probably due to $SSH_AUTH_SOCK not being set") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Here is it possible to add the value of SSH_AUTH_SOCK? Ie. ("unable to establish net connection, $SSH_AUTH_SOCK has value %s", os.Getenv("SSH_AUTH_SOCK")) |
||
return nil, err | ||
} | ||
defer conn.Close() | ||
ag := agent.NewClient(conn) | ||
|
@@ -51,7 +52,8 @@ func NewConnection(host, port, user, keyPath string) (*Connection, error) { | |
ag.Add(addKey) | ||
signers, err := ag.Signers() | ||
if err != nil { | ||
log.Fatal(err) | ||
log.Println("unable to add key to agent") | ||
return nil, err | ||
} | ||
auths := []ssh.AuthMethod{ssh.PublicKeys(signers...)} | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
In the docs (https://github.com/Azure/acs-engine/blob/master/docs/kubernetes-large-clusters.md)
We recommend the use of smaller pools (e.g., count of 20) over larger pools (e.g., count of 100); produce your desired total node count with lots of pools, as opposed to as few as possible.
Would it make sense to change this to several smaller pools since people are likely to look at example files and to copy them for their own implementation?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@amanohar @khenidak Is the above guidance still valid? (i.e., don't use node pools > 20 count)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
i admit this answer is not as clear as i would love it to be
From one end 50 nodes with 4+ pools will hit ARM template limit (800 object). From another, I really don't see a relationship between # of nodes per pool and scale. As long as we don't hit more than 100 per availability set (or scale set) we are fine.
Many pools are better because users will get extra load balancers (Extra public IPs), smaller blast radius (in case of failures). They are however harder to manager