Skip to content

Commit

Permalink
Merge pull request #1020 from jsturtevant/e2e-deployment-logic
Browse files Browse the repository at this point in the history
E2e deployment logic refactor
  • Loading branch information
k8s-ci-robot authored Nov 2, 2020
2 parents c613c3b + a724144 commit fd51606
Show file tree
Hide file tree
Showing 5 changed files with 239 additions and 213 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ test-e2e-local: $(ENVSUBST) $(KUBECTL) $(GINKGO) ## Run e2e tests
PULL_POLICY=IfNotPresent $(MAKE) docker-build
MANAGER_IMAGE=$(CONTROLLER_IMG)-$(ARCH):$(TAG) \
$(ENVSUBST) < $(E2E_CONF_FILE) > $(E2E_CONF_FILE_ENVSUBST) && \
$(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" -skip="${GINKGO_SKIP}" -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) ./test/e2e -- \
$(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" -skip="${GINKGO_SKIP}" -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) -stream --progress ./test/e2e -- \
-e2e.artifacts-folder="$(ARTIFACTS)" \
-e2e.config="$(E2E_CONF_FILE_ENVSUBST)" \
-e2e.skip-resource-cleanup=$(SKIP_CLEANUP) -e2e.use-existing-cluster=$(SKIP_CREATE_MGMT_CLUSTER)
Expand Down
227 changes: 55 additions & 172 deletions test/e2e/azure_lb.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ import (
k8snet "k8s.io/utils/net"
"net"
"regexp"
deploymentBuilder "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment"
"sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/job"

retryablehttp "github.com/hashicorp/go-retryablehttp"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -66,89 +66,39 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {
Expect(clientset).NotTo(BeNil())

By("creating an Apache HTTP deployment")
deploymentsClient := clientset.AppsV1().Deployments(corev1.NamespaceDefault)
var replicas int32 = 1
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "httpd",
Namespace: corev1.NamespaceDefault,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "httpd",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "httpd",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "web",
Image: "httpd",
Ports: []corev1.ContainerPort{
{
Name: "http",
Protocol: corev1.ProtocolTCP,
ContainerPort: 80,
},
},
},
},
},
},
},
}
_, err := deploymentsClient.Create(deployment)
webDeployment := deploymentBuilder.CreateDeployment("httpd", "web", corev1.NamespaceDefault)
webDeployment.AddContainerPort("httpd", "http", 80, corev1.ProtocolTCP)
deployment, err := webDeployment.Deploy(clientset)
Expect(err).NotTo(HaveOccurred())
deployInput := WaitForDeploymentsAvailableInput{
Getter: deploymentsClientAdapter{client: deploymentsClient},
Getter: deploymentsClientAdapter{client: webDeployment.Client(clientset)},
Deployment: deployment,
Clientset: clientset,
}
WaitForDeploymentsAvailable(context.TODO(), deployInput, e2eConfig.GetIntervals(specName, "wait-deployment")...)

servicesClient := clientset.CoreV1().Services(corev1.NamespaceDefault)
jobsClient := clientset.BatchV1().Jobs(corev1.NamespaceDefault)
jobName := "curl-to-ilb-job"
ilbName := "httpd-ilb"

ports := []corev1.ServicePort{
{
Name: "http",
Port: 80,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
Port: 443,
Protocol: corev1.ProtocolTCP,
},
}

// TODO: fix and enable this. Internal LBs + IPv6 is currently in preview.
// https://docs.microsoft.com/en-us/azure/virtual-network/ipv6-dual-stack-standard-internal-load-balancer-powershell
if !input.IPv6 {
By("creating an internal Load Balancer service")
ilbService := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: ilbName,
Namespace: corev1.NamespaceDefault,
Annotations: map[string]string{
"service.beta.kubernetes.io/azure-load-balancer-internal": "true",
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeLoadBalancer,
Ports: []corev1.ServicePort{
{
Name: "http",
Port: 80,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
Port: 443,
Protocol: corev1.ProtocolTCP,
},
},
Selector: map[string]string{
"app": "httpd",
},
},
}

ilbService := webDeployment.GetService(ports, deploymentBuilder.InternalLoadbalancer)
_, err = servicesClient.Create(ilbService)
Expect(err).NotTo(HaveOccurred())
ilbSvcInput := WaitForServiceAvailableInput{
Expand All @@ -160,42 +110,11 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {

By("connecting to the internal LB service from a curl pod")

svc, err := servicesClient.Get(ilbName, metav1.GetOptions{})
svc, err := servicesClient.Get(ilbService.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
var ilbIP string
for _, i := range svc.Status.LoadBalancer.Ingress {
if net.ParseIP(i.IP) != nil {
if k8snet.IsIPv6String(i.IP) {
ilbIP = fmt.Sprintf("[%s]", i.IP)
break
}
ilbIP = i.IP
break
}
}
ilbJob := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Namespace: corev1.NamespaceDefault,
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{
{
Name: "curl",
Image: "curlimages/curl",
Command: []string{
"curl",
ilbIP,
},
},
},
},
},
},
}
ilbIP := extractServiceIp(svc)

ilbJob := job.CreateCurlJob("curl-to-ilb-job", ilbIP)
_, err = jobsClient.Create(ilbJob)
Expect(err).NotTo(HaveOccurred())
ilbJobInput := WaitForJobCompleteInput{
Expand All @@ -204,33 +123,18 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {
Clientset: clientset,
}
WaitForJobComplete(context.TODO(), ilbJobInput, e2eConfig.GetIntervals(specName, "wait-job")...)

if !input.SkipCleanup {
By("deleting the ilb test resources")
err = servicesClient.Delete(ilbService.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
err = jobsClient.Delete(ilbJob.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
}

By("creating an external Load Balancer service")
elbService := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "httpd-elb",
Namespace: corev1.NamespaceDefault,
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeLoadBalancer,
Ports: []corev1.ServicePort{
{
Name: "http",
Port: 80,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
Port: 443,
Protocol: corev1.ProtocolTCP,
},
},
Selector: map[string]string{
"app": "httpd",
},
},
}
elbService := webDeployment.GetService(ports, deploymentBuilder.ExternalLoadbalancer)
_, err = servicesClient.Create(elbService)
Expect(err).NotTo(HaveOccurred())
elbSvcInput := WaitForServiceAvailableInput{
Expand All @@ -241,42 +145,11 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {
WaitForServiceAvailable(context.TODO(), elbSvcInput, e2eConfig.GetIntervals(specName, "wait-service")...)

By("connecting to the external LB service from a curl pod")
svc, err := servicesClient.Get("httpd-elb", metav1.GetOptions{})
svc, err := servicesClient.Get(elbService.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
var elbIP string
for _, i := range svc.Status.LoadBalancer.Ingress {
if net.ParseIP(i.IP) != nil {
if k8snet.IsIPv6String(i.IP) {
elbIP = fmt.Sprintf("[%s]", i.IP)
break
}
elbIP = i.IP
break
}
}
elbJob := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "curl-to-elb-job",
Namespace: corev1.NamespaceDefault,
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{
{
Name: "curl",
Image: "curlimages/curl",
Command: []string{
"curl",
elbIP,
},
},
},
},
},
},
}

elbIP := extractServiceIp(svc)
elbJob := job.CreateCurlJob("curl-to-elb-job", elbIP)
_, err = jobsClient.Create(elbJob)
Expect(err).NotTo(HaveOccurred())
elbJobInput := WaitForJobCompleteInput{
Expand Down Expand Up @@ -305,16 +178,26 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {
return
}
By("deleting the test resources")
if !input.IPv6 {
err = servicesClient.Delete(ilbName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
err = jobsClient.Delete(jobName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
err = servicesClient.Delete(elbService.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
err = deploymentsClient.Delete(deployment.Name, &metav1.DeleteOptions{})
err = webDeployment.Client(clientset).Delete(deployment.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
err = jobsClient.Delete(elbJob.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}

func extractServiceIp(svc *corev1.Service) string {
var ilbIP string
for _, i := range svc.Status.LoadBalancer.Ingress {
if net.ParseIP(i.IP) != nil {
if k8snet.IsIPv6String(i.IP) {
ilbIP = fmt.Sprintf("[%s]", i.IP)
break
}
ilbIP = i.IP
break
}
}

return ilbIP
}
38 changes: 28 additions & 10 deletions test/e2e/azure_net_pol.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

e2e_deployment "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment"
deploymentBuilder "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment"
e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace"
e2e_networkpolicy "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/networkpolicy"

Expand Down Expand Up @@ -87,20 +87,38 @@ func AzureNetPolSpec(ctx context.Context, inputGetter func() AzureNetPolSpecInpu
By("Creating frontendProd, backend and network-policy pod deployments")
r := rand.New(rand.NewSource(time.Now().UnixNano()))
randInt := r.Intn(99999)
frontendProdDeploymentName := fmt.Sprintf("frontend-prod-%v", randInt)

// Front end
frontendLabels := map[string]string{"app": "webapp", "role": "frontend"}
frontendProdDeployment, err := e2e_deployment.CreateLinuxDeployment(clientset, "library/nginx:latest", frontendProdDeploymentName, namespaceProd.GetName(), frontendLabels)

// Front end Prod
frontendProdDeploymentName := fmt.Sprintf("frontend-prod-%v", randInt)
frontEndProd := deploymentBuilder.CreateDeployment("library/nginx:latest", frontendProdDeploymentName, namespaceProd.GetName())
frontEndProd.AddLabels(frontendLabels)
frontendProdDeployment, err := frontEndProd.Deploy(clientset)
Expect(err).NotTo(HaveOccurred())

// Front end Dev
frontendDevDeploymentName := fmt.Sprintf("frontend-dev-%v", randInt+100000)
frontendDevDeployment, err := e2e_deployment.CreateLinuxDeployment(clientset, "library/nginx:latest", frontendDevDeploymentName, namespaceDev.GetName(), frontendLabels)
frontEndDev := deploymentBuilder.CreateDeployment("library/nginx:latest", frontendDevDeploymentName, namespaceDev.GetName())
frontEndDev.AddLabels(frontendLabels)
frontendDevDeployment, err := frontEndDev.Deploy(clientset)
Expect(err).NotTo(HaveOccurred())

// Backend
backendDeploymentName := fmt.Sprintf("backend-%v", randInt+200000)
backendLabels := map[string]string{"app": "webapp", "role": "backend"}
backendDeployment, err := e2e_deployment.CreateLinuxDeployment(clientset, "library/nginx:latest", backendDeploymentName, namespaceDev.GetName(), backendLabels)
backendDev := deploymentBuilder.CreateDeployment("library/nginx:latest", backendDeploymentName, namespaceDev.GetName())
backendDev.AddLabels(backendLabels)
backendDeployment, err := backendDev.Deploy(clientset)
Expect(err).NotTo(HaveOccurred())

// Network policy
nwpolicyDeploymentName := fmt.Sprintf("network-policy-%v", randInt+300000)
nwpolicyLabels := map[string]string{"app": "webapp", "role": "any"}
nwpolicyDeployment, err := e2e_deployment.CreateLinuxDeployment(clientset, "library/nginx:latest", nwpolicyDeploymentName, namespaceDev.GetName(), nwpolicyLabels)
nwpolicy := deploymentBuilder.CreateDeployment("library/nginx:latest", nwpolicyDeploymentName, namespaceDev.GetName())
nwpolicy.AddLabels(nwpolicyLabels)
nwpolicyDeployment, err := nwpolicy.Deploy(clientset)
Expect(err).NotTo(HaveOccurred())

By("Ensure there is a running frontend-prod pod")
Expand Down Expand Up @@ -132,22 +150,22 @@ func AzureNetPolSpec(ctx context.Context, inputGetter func() AzureNetPolSpecInpu
framework.WaitForDeploymentsAvailable(context.TODO(), nwpolicyDeploymentInput, e2eConfig.GetIntervals(specName, "wait-deployment")...)

By("Ensuring we have outbound internet access from the frontend-prod pods")
frontendProdPods, err := e2e_deployment.GetPodsFromDeployment(clientset, frontendProdDeployment)
frontendProdPods, err := frontEndProd.GetPodsFromDeployment(clientset)
Expect(err).NotTo(HaveOccurred())
e2e_networkpolicy.EnsureOutboundInternetAccess(clientset, config, frontendProdPods)

By("Ensuring we have outbound internet access from the frontend-dev pods")
frontendDevPods, err := e2e_deployment.GetPodsFromDeployment(clientset, frontendDevDeployment)
frontendDevPods, err := frontEndDev.GetPodsFromDeployment(clientset)
Expect(err).NotTo(HaveOccurred())
e2e_networkpolicy.EnsureOutboundInternetAccess(clientset, config, frontendDevPods)

By("Ensuring we have outbound internet access from the backend pods")
backendPods, err := e2e_deployment.GetPodsFromDeployment(clientset, backendDeployment)
backendPods, err := backendDev.GetPodsFromDeployment(clientset)
Expect(err).NotTo(HaveOccurred())
e2e_networkpolicy.EnsureOutboundInternetAccess(clientset, config, backendPods)

By("Ensuring we have outbound internet access from the network-policy pods")
nwpolicyPods, err := e2e_deployment.GetPodsFromDeployment(clientset, nwpolicyDeployment)
nwpolicyPods, err := nwpolicy.GetPodsFromDeployment(clientset)
Expect(err).NotTo(HaveOccurred())
e2e_networkpolicy.EnsureOutboundInternetAccess(clientset, config, nwpolicyPods)

Expand Down
Loading

0 comments on commit fd51606

Please sign in to comment.