Skip to content

Commit

Permalink
E2E Addons (Azure#2294)
Browse files Browse the repository at this point in the history
* add features on and features off

* fix off model

* add seperate tests for each feature disabled

* move features off dir

* rbac bool

* added clear containers

* added addons enabled test

* fix typo in apimodel

* remove aci-connector

* wip add mem/cpu limits/requests checks

* add resources to container spec

* fix resources type

* add checks to tiller

* remove extra err var

* add check for dashboard and aci connector

* update default definition

* fmt

* fix typo

* Refactor resources validation

* fix error string

* fix linter

* remove pointer

* fix ineffassign

* small fixes
  • Loading branch information
Cecile Robert-Michon authored and Terje Torkelsen committed Mar 15, 2018
1 parent 8481a9d commit 021a079
Show file tree
Hide file tree
Showing 4 changed files with 121 additions and 55 deletions.
17 changes: 12 additions & 5 deletions examples/e2e-tests/kubernetes/release/default/definition.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@
{
"name": "tiller",
"cpuRequests": "1",
"memoryRequests": "1024Mi",
"memoryRequests": "1Gi",
"cpuLimits": "1",
"memoryLimits": "1024Mi"
"memoryLimits": "1Gi"
}
]
},
Expand Down Expand Up @@ -66,8 +66,13 @@
"count": 3,
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"storageProfile" : "ManagedDisks",
"diskSizesGB": [128, 128, 128, 128],
"storageProfile": "ManagedDisks",
"diskSizesGB": [
128,
128,
128,
128
],
"availabilityProfile": "AvailabilitySet",
"vnetSubnetId": "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME"
},
Expand All @@ -77,7 +82,9 @@
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"storageProfile": "StorageAccount",
"diskSizesGB": [128],
"diskSizesGB": [
128
],
"availabilityProfile": "AvailabilitySet",
"vnetSubnetId": "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME"
}
Expand Down
36 changes: 3 additions & 33 deletions test/e2e/engine/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,46 +158,16 @@ func (e *Engine) HasWindowsAgents() bool {
return false
}

// HasDashboard will return true if kubernetes-dashboard addon is enabled
func (e *Engine) HasDashboard() bool {
// HasAddon will return true if an addon is enabled
func (e *Engine) HasAddon(name string) (bool, api.KubernetesAddon) {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "kubernetes-dashboard" {
return *addon.Enabled
}
}
return false
}

// HasTiller will return true if tiller addon is enabled
func (e *Engine) HasTiller() (bool, api.KubernetesAddon) {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "tiller" {
if addon.Name == name {
return *addon.Enabled, addon
}
}
return false, api.KubernetesAddon{}
}

// HasACIConnector will return true if aci-connector addon is enabled
func (e *Engine) HasACIConnector() bool {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "aci-connector" {
return *addon.Enabled
}
}
return false
}

// HasRescheduler will return true if rescheduler addon is enabled
func (e *Engine) HasRescheduler() bool {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "rescheduler" {
return *addon.Enabled
}
}
return false
}

// OrchestratorVersion1Dot8AndUp will return true if the orchestrator version is 1.8 and up
func (e *Engine) OrchestratorVersion1Dot8AndUp() bool {
return e.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion >= "1.8"
Expand Down
54 changes: 40 additions & 14 deletions test/e2e/kubernetes/kubernetes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,27 +125,28 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
})

It("should have tiller running", func() {
if hasTiller, tillerAddon := eng.HasTiller(); hasTiller {
if hasTiller, tillerAddon := eng.HasAddon("tiller"); hasTiller {
running, err := pod.WaitOnReady("tiller", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
if tillerAddon.Config != nil {
By("Ensuring that the correct max-history has been applied")
maxHistory := tillerAddon.Config["max-history"]
pods, err := pod.GetAllByPrefix("tiller-deploy", "kube-system")
Expect(err).NotTo(HaveOccurred())
// There is only one tiller pod and one container in that pod.
actualTillerMaxHistory, err := pods[0].Spec.Containers[0].GetEnvironmentVariable("TILLER_HISTORY_MAX")
Expect(err).NotTo(HaveOccurred())
Expect(actualTillerMaxHistory).To(Equal(maxHistory))
}
pods, err := pod.GetAllByPrefix("tiller-deploy", "kube-system")
Expect(err).NotTo(HaveOccurred())
By("Ensuring that the correct max-history has been applied")
maxHistory := tillerAddon.Config["max-history"]
// There is only one tiller pod and one container in that pod
actualTillerMaxHistory, err := pods[0].Spec.Containers[0].GetEnvironmentVariable("TILLER_HISTORY_MAX")
Expect(err).NotTo(HaveOccurred())
Expect(actualTillerMaxHistory).To(Equal(maxHistory))
By("Ensuring that the correct resources have been applied")
err = pods[0].Spec.Containers[0].ValidateResources(tillerAddon.Containers[0])
Expect(err).NotTo(HaveOccurred())
} else {
Skip("tiller disabled for this cluster, will not test")
}
})

It("should be able to access the dashboard from each node", func() {
if eng.HasDashboard() {
if hasDashboard, dashboardAddon := eng.HasAddon("kubernetes-dashboard"); hasDashboard {
By("Ensuring that the kubernetes-dashboard pod is Running")

running, err := pod.WaitOnReady("kubernetes-dashboard", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expand Down Expand Up @@ -201,27 +202,52 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
}
Expect(success).To(BeTrue())
}
By("Ensuring that the correct resources have been applied")
// Assuming one dashboard pod
pods, err := pod.GetAllByPrefix("kubernetes-dashboard", "kube-system")
Expect(err).NotTo(HaveOccurred())
for i, c := range dashboardAddon.Containers {
err := pods[0].Spec.Containers[i].ValidateResources(c)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("kubernetes-dashboard disabled for this cluster, will not test")
}
})

It("should have aci-connector running", func() {
if eng.HasACIConnector() {
if hasACIConnector, ACIConnectorAddon := eng.HasAddon("aci-connector"); hasACIConnector {
running, err := pod.WaitOnReady("aci-connector", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Ensuring that the correct resources have been applied")
// Assuming one aci-connector pod
pods, err := pod.GetAllByPrefix("aci-connector", "kube-system")
Expect(err).NotTo(HaveOccurred())
for i, c := range ACIConnectorAddon.Containers {
err := pods[0].Spec.Containers[i].ValidateResources(c)
Expect(err).NotTo(HaveOccurred())
}

} else {
Skip("aci-connector disabled for this cluster, will not test")
}
})

It("should have rescheduler running", func() {
if eng.HasRescheduler() {
if hasRescheduler, reschedulerAddon := eng.HasAddon("rescheduler"); hasRescheduler {
running, err := pod.WaitOnReady("rescheduler", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Ensuring that the correct resources have been applied")
// Assuming one rescheduler pod
pods, err := pod.GetAllByPrefix("rescheduler", "kube-system")
Expect(err).NotTo(HaveOccurred())
for i, c := range reschedulerAddon.Containers {
err := pods[0].Spec.Containers[i].ValidateResources(c)
Expect(err).NotTo(HaveOccurred())
}
} else {
Skip("rescheduler disabled for this cluster, will not test")
}
Expand Down
69 changes: 66 additions & 3 deletions test/e2e/kubernetes/pod/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"strings"
"time"

"github.com/Azure/acs-engine/pkg/api"
"github.com/Azure/acs-engine/test/e2e/kubernetes/util"
)

Expand Down Expand Up @@ -45,9 +46,10 @@ type Spec struct {

// Container holds information like image and ports
type Container struct {
Image string `json:"image"`
Ports []Port `json:"ports"`
Env []EnvVar `json:"env"`
Image string `json:"image"`
Ports []Port `json:"ports"`
Env []EnvVar `json:"env"`
Resources Resources `json:"resources"`
}

// EnvVar holds environment variables
Expand All @@ -62,6 +64,24 @@ type Port struct {
HostPort int `json:"hostPort"`
}

// Resources represents a container resources definition
type Resources struct {
Requests Requests `json:"requests"`
Limits Limits `json:"limits"`
}

// Requests represents container resource requests
type Requests struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
}

// Limits represents container resource limits
type Limits struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
}

// Status holds information like hostIP and phase
type Status struct {
HostIP string `json:"hostIP"`
Expand Down Expand Up @@ -416,6 +436,29 @@ func (p *Pod) ValidateAzureFile(mountPath string, sleep, duration time.Duration)
}
}

// ValidateResources checks that an addon has the expected memory/cpu limits and requests
func (c *Container) ValidateResources(a api.KubernetesContainerSpec) error {
expectedCPURequests := a.CPURequests
expectedCPULimits := a.CPULimits
expectedMemoryRequests := a.MemoryRequests
expectedMemoryLimits := a.MemoryLimits
actualCPURequests := c.getCPURequests()
actualCPULimits := c.getCPULimits()
actualMemoryRequests := c.getMemoryRequests()
actualLimits := c.getMemoryLimits()
if expectedCPURequests != actualCPURequests {
return fmt.Errorf("expected CPU requests %s does not match %s", expectedCPURequests, actualCPURequests)
} else if expectedCPULimits != actualCPULimits {
return fmt.Errorf("expected CPU limits %s does not match %s", expectedCPULimits, actualCPULimits)
} else if expectedMemoryRequests != actualMemoryRequests {
return fmt.Errorf("expected Memory requests %s does not match %s", expectedMemoryRequests, actualMemoryRequests)
} else if expectedMemoryLimits != actualLimits {
return fmt.Errorf("expected Memory limits %s does not match %s", expectedMemoryLimits, actualLimits)
} else {
return nil
}
}

// GetEnvironmentVariable returns an environment variable value from a container within a pod
func (c *Container) GetEnvironmentVariable(varName string) (string, error) {
for _, envvar := range c.Env {
Expand All @@ -425,3 +468,23 @@ func (c *Container) GetEnvironmentVariable(varName string) (string, error) {
}
return "", errors.New("environment variable not found")
}

// getCPURequests returns an the CPU Requests value from a container within a pod
func (c *Container) getCPURequests() string {
return c.Resources.Requests.CPU
}

// getCPULimits returns an the CPU Requests value from a container within a pod
func (c *Container) getCPULimits() string {
return c.Resources.Limits.CPU
}

// DashboardtMemoryRequests returns an the CPU Requests value from a container within a pod
func (c *Container) getMemoryRequests() string {
return c.Resources.Requests.Memory
}

// getMemoryLimits returns an the CPU Requests value from a container within a pod
func (c *Container) getMemoryLimits() string {
return c.Resources.Limits.Memory
}

0 comments on commit 021a079

Please sign in to comment.