From 03227c2dee6b8bf4d7117ab739bb2df8b129e7cb Mon Sep 17 00:00:00 2001 From: "Beata Lach (Skiba)" Date: Mon, 15 Jun 2020 18:47:27 +0200 Subject: [PATCH] Fixes needed after e2e deps update --- vertical-pod-autoscaler/e2e/utils/webhook.go | 51 +-- vertical-pod-autoscaler/e2e/v1/actuation.go | 52 ++- .../e2e/v1/admission_controller.go | 7 +- .../e2e/v1/autoscaling_utils.go | 88 ++--- vertical-pod-autoscaler/e2e/v1/common.go | 21 +- vertical-pod-autoscaler/e2e/v1/e2e.go | 312 +++++++++++++++++- vertical-pod-autoscaler/e2e/v1/e2e_test.go | 3 +- vertical-pod-autoscaler/e2e/v1/full_vpa.go | 7 +- vertical-pod-autoscaler/e2e/v1/recommender.go | 9 +- vertical-pod-autoscaler/e2e/v1/updater.go | 3 +- .../e2e/v1beta2/actuation.go | 52 ++- .../e2e/v1beta2/admission_controller.go | 7 +- .../e2e/v1beta2/autoscaling_utils.go | 88 ++--- vertical-pod-autoscaler/e2e/v1beta2/common.go | 23 +- vertical-pod-autoscaler/e2e/v1beta2/e2e.go | 312 +++++++++++++++++- .../e2e/v1beta2/e2e_test.go | 3 +- .../e2e/v1beta2/full_vpa.go | 7 +- .../e2e/v1beta2/recommender.go | 9 +- .../e2e/v1beta2/updater.go | 3 +- 19 files changed, 853 insertions(+), 204 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/utils/webhook.go b/vertical-pod-autoscaler/e2e/utils/webhook.go index d6fb65f9add8..760cf1632bb9 100644 --- a/vertical-pod-autoscaler/e2e/utils/webhook.go +++ b/vertical-pod-autoscaler/e2e/utils/webhook.go @@ -19,6 +19,7 @@ limitations under the License. package utils import ( + "context" "fmt" "strings" "time" @@ -55,32 +56,32 @@ func LabelNamespace(f *framework.Framework, namespace string) { client := f.ClientSet // Add a unique label to the namespace - ns, err := client.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}) + ns, err := client.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting namespace %s", namespace) if ns.Labels == nil { ns.Labels = map[string]string{} } ns.Labels[f.UniqueName] = "true" - _, err = client.CoreV1().Namespaces().Update(ns) + _, err = client.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) framework.ExpectNoError(err, "error labeling namespace %s", namespace) } // CreateWebhookConfigurationReadyNamespace creates a separate namespace for webhook configuration ready markers to // prevent cross-talk with webhook configurations being tested. func CreateWebhookConfigurationReadyNamespace(f *framework.Framework) { - ns, err := f.ClientSet.CoreV1().Namespaces().Create(&v1.Namespace{ + ns, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: f.Namespace.Name + "-markers", Labels: map[string]string{f.UniqueName + "-markers": "true"}, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating namespace for webhook configuration ready markers") f.AddNamespacesToDelete(ns) } // RegisterMutatingWebhookForPod creates mutation webhook configuration // and applies it to the cluster. -func RegisterMutatingWebhookForPod(f *framework.Framework, configName string, context *certContext, servicePort int32) func() { +func RegisterMutatingWebhookForPod(f *framework.Framework, configName string, certContext *certContext, servicePort int32) func() { client := f.ClientSet ginkgo.By("Registering the mutating pod webhook via the AdmissionRegistration API") @@ -109,7 +110,7 @@ func RegisterMutatingWebhookForPod(f *framework.Framework, configName string, co Path: strPtr("/mutating-pods-sidecar"), Port: pointer.Int32Ptr(servicePort), }, - CABundle: context.signingCert, + CABundle: certContext.signingCert, }, SideEffects: &sideEffectsNone, AdmissionReviewVersions: []string{"v1", "v1beta1"}, @@ -119,7 +120,7 @@ func RegisterMutatingWebhookForPod(f *framework.Framework, configName string, co }, }, // Register a webhook that can be probed by marker requests to detect when the configuration is ready. - newMutatingIsReadyWebhookFixture(f, context, servicePort), + newMutatingIsReadyWebhookFixture(f, certContext, servicePort), }, }) framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace) @@ -128,7 +129,7 @@ func RegisterMutatingWebhookForPod(f *framework.Framework, configName string, co framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) + client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -144,12 +145,12 @@ func createMutatingWebhookConfiguration(f *framework.Framework, config *admissio } framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName) } - return f.ClientSet.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(config) + return f.ClientSet.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), config, metav1.CreateOptions{}) } // newMutatingIsReadyWebhookFixture creates a mutating webhook that can be added to a webhook configuration and then probed // with "marker" requests via waitWebhookConfigurationReady to wait for a webhook configuration to be ready. -func newMutatingIsReadyWebhookFixture(f *framework.Framework, context *certContext, servicePort int32) admissionregistrationv1beta1.MutatingWebhook { +func newMutatingIsReadyWebhookFixture(f *framework.Framework, certContext *certContext, servicePort int32) admissionregistrationv1beta1.MutatingWebhook { sideEffectsNone := admissionregistrationv1beta1.SideEffectClassNone failOpen := admissionregistrationv1beta1.Ignore return admissionregistrationv1beta1.MutatingWebhook{ @@ -169,7 +170,7 @@ func newMutatingIsReadyWebhookFixture(f *framework.Framework, context *certConte Path: strPtr("/always-deny"), Port: pointer.Int32Ptr(servicePort), }, - CABundle: context.signingCert, + CABundle: certContext.signingCert, }, // network failures while the service network routing is being set up should be ignored by the marker FailurePolicy: &failOpen, @@ -200,7 +201,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error { }, }, } - _, err := cmClient.Create(marker) + _, err := cmClient.Create(context.TODO(), marker, metav1.CreateOptions{}) if err != nil { // The always-deny webhook does not provide a reason, so check for the error string we expect if strings.Contains(err.Error(), "denied") { @@ -209,7 +210,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error { return false, err } // best effort cleanup of markers that are no longer needed - _ = cmClient.Delete(marker.GetName(), nil) + _ = cmClient.Delete(context.TODO(), marker.GetName(), metav1.DeleteOptions{}) framework.Logf("Waiting for webhook configuration to be ready...") return false, nil }) @@ -220,7 +221,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error { func CreateAuthReaderRoleBinding(f *framework.Framework, namespace string) { ginkgo.By("Create role binding to let webhook read extension-apiserver-authentication") client := f.ClientSet - _, err := client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{ + _, err := client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: roleBindingName, Annotations: map[string]string{ @@ -240,7 +241,7 @@ func CreateAuthReaderRoleBinding(f *framework.Framework, namespace string) { Namespace: namespace, }, }, - }) + }, metav1.CreateOptions{}) if err != nil && apierrors.IsAlreadyExists(err) { framework.Logf("role binding %s already exists", roleBindingName) } else { @@ -249,7 +250,7 @@ func CreateAuthReaderRoleBinding(f *framework.Framework, namespace string) { } // DeployWebhookAndService creates a webhook with a corresponding service. -func DeployWebhookAndService(f *framework.Framework, image string, context *certContext, servicePort int32, +func DeployWebhookAndService(f *framework.Framework, image string, certContext *certContext, servicePort int32, containerPort int32, params ...string) { ginkgo.By("Deploying the webhook pod") client := f.ClientSet @@ -261,12 +262,12 @@ func DeployWebhookAndService(f *framework.Framework, image string, context *cert }, Type: v1.SecretTypeOpaque, Data: map[string][]byte{ - "tls.crt": context.cert, - "tls.key": context.key, + "tls.crt": certContext.cert, + "tls.key": certContext.key, }, } namespace := f.Namespace.Name - _, err := client.CoreV1().Secrets(namespace).Create(secret) + _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) // Create the deployment of the webhook @@ -342,7 +343,7 @@ func DeployWebhookAndService(f *framework.Framework, image string, context *cert }, }, } - deployment, err := client.AppsV1().Deployments(namespace).Create(d) + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) ginkgo.By("Wait for the deployment to be ready") err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) @@ -370,7 +371,7 @@ func DeployWebhookAndService(f *framework.Framework, image string, context *cert }, }, } - _, err = client.CoreV1().Services(namespace).Create(service) + _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating service %s in namespace %s", WebhookServiceName, namespace) ginkgo.By("Verifying the service has paired with the endpoint") @@ -380,8 +381,8 @@ func DeployWebhookAndService(f *framework.Framework, image string, context *cert // CleanWebhookTest cleans after a webhook test. func CleanWebhookTest(client clientset.Interface, namespaceName string) { - _ = client.CoreV1().Services(namespaceName).Delete(WebhookServiceName, nil) - _ = client.AppsV1().Deployments(namespaceName).Delete(deploymentName, nil) - _ = client.CoreV1().Secrets(namespaceName).Delete(WebhookServiceName, nil) - _ = client.RbacV1().RoleBindings("kube-system").Delete(roleBindingName, nil) + _ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), WebhookServiceName, metav1.DeleteOptions{}) + _ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{}) + _ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), WebhookServiceName, metav1.DeleteOptions{}) + _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingName, metav1.DeleteOptions{}) } diff --git a/vertical-pod-autoscaler/e2e/v1/actuation.go b/vertical-pod-autoscaler/e2e/v1/actuation.go index 2c8edd27de91..cc5fa306cf8b 100644 --- a/vertical-pod-autoscaler/e2e/v1/actuation.go +++ b/vertical-pod-autoscaler/e2e/v1/actuation.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "time" @@ -36,6 +37,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment" framework_job "k8s.io/kubernetes/test/e2e/framework/job" + framework_rc "k8s.io/kubernetes/test/e2e/framework/rc" framework_rs "k8s.io/kubernetes/test/e2e/framework/replicaset" framework_ss "k8s.io/kubernetes/test/e2e/framework/statefulset" testutils "k8s.io/kubernetes/test/utils" @@ -188,7 +190,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() { permissiveMaxUnavailable := 7 // Creating new PDB and removing old one, since PDBs are immutable at the moment setupPDB(f, "hamster-pdb-2", permissiveMaxUnavailable) - err = c.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(pdb.Name, &metav1.DeleteOptions{}) + err = c.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(context.TODO(), pdb.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, sleep for %s", VpaEvictionTimeout.String())) @@ -365,7 +367,7 @@ func getCPURequest(podSpec apiv1.PodSpec) resource.Quantity { } func killPod(f *framework.Framework, podList *apiv1.PodList) { - f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podList.Items[0].Name, &metav1.DeleteOptions{}) + f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podList.Items[0].Name, metav1.DeleteOptions{}) err := WaitForPodsRestarted(f, podList) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -468,8 +470,7 @@ func setupHamsterController(f *framework.Framework, controllerKind, cpu, memory func setupHamsterReplicationController(f *framework.Framework, cpu, memory string, replicas int32) { hamsterContainer := SetupHamsterContainer(cpu, memory) - rc := framework.RcByNameContainer("hamster-rc", replicas, "k8s.gcr.io/ubuntu-slim:0.1", - hamsterLabels, hamsterContainer, nil) + rc := framework_rc.ByNameContainer("hamster-rc", replicas, hamsterLabels, hamsterContainer, nil) rc.Namespace = f.Namespace.Name err := testutils.CreateRCWithRetries(f.ClientSet, f.Namespace.Name, rc) @@ -509,8 +510,7 @@ func setupHamsterJob(f *framework.Framework, cpu, memory string, replicas int32) } func setupHamsterRS(f *framework.Framework, cpu, memory string, replicas int32) { - rs := framework_rs.NewReplicaSet("hamster-rs", f.Namespace.Name, replicas, - hamsterLabels, "", "") + rs := newReplicaSet("hamster-rs", f.Namespace.Name, replicas, hamsterLabels, "", "") rs.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory) err := createReplicaSetWithRetries(f.ClientSet, f.Namespace.Name, rs) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -541,7 +541,7 @@ func setupPDB(f *framework.Framework, name string, maxUnavailable int) *policyv1 }, }, } - _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(f.Namespace.Name).Create(pdb) + _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(f.Namespace.Name).Create(context.TODO(), pdb, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return pdb } @@ -557,7 +557,7 @@ func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a return fmt.Errorf("object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().ReplicaSets(namespace).Create(obj) + _, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrs.IsAlreadyExists(err) { return true, nil } @@ -574,7 +574,7 @@ func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, ob return fmt.Errorf("object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().StatefulSets(namespace).Create(obj) + _, err := c.AppsV1().StatefulSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrs.IsAlreadyExists(err) { return true, nil } @@ -585,3 +585,37 @@ func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, ob } return testutils.RetryWithExponentialBackOff(createFunc) } + +// newReplicaSet returns a new ReplicaSet. +func newReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *appsv1.ReplicaSet { + return &appsv1.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: appsv1.ReplicaSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: podLabels, + }, + Replicas: &replicas, + Template: apiv1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + }, + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Name: imageName, + Image: image, + SecurityContext: &apiv1.SecurityContext{}, + }, + }, + }, + }, + }, + } +} diff --git a/vertical-pod-autoscaler/e2e/v1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1/admission_controller.go index 8885aa394e6f..9a59a69d0f69 100644 --- a/vertical-pod-autoscaler/e2e/v1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1/admission_controller.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "time" @@ -531,7 +532,7 @@ func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) zero := int32(0) deployment.Spec.Replicas = &zero c, ns := f.ClientSet, f.Namespace.Name - deployment, err := c.AppsV1().Deployments(ns).Create(deployment) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when creating deployment with size 0") err = framework_deployment.WaitForDeploymentComplete(c, deployment) @@ -555,12 +556,12 @@ func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) Replicas: desiredPodCount, }, } - afterScale, err := c.AppsV1().Deployments(ns).UpdateScale(deployment.Name, &scale) + afterScale, err := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deployment.Name, &scale, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(afterScale.Spec.Replicas).To(gomega.Equal(desiredPodCount), fmt.Sprintf("expected %d replicas after scaling", desiredPodCount)) // After scaling deployment we need to retrieve current version with updated replicas count. - deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when getting scaled deployment") err = framework_deployment.WaitForDeploymentComplete(c, deployment) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when waiting for deployment to resize") diff --git a/vertical-pod-autoscaler/e2e/v1/autoscaling_utils.go b/vertical-pod-autoscaler/e2e/v1/autoscaling_utils.go index 66cc2a5e05c8..1b632eeaf2bf 100644 --- a/vertical-pod-autoscaler/e2e/v1/autoscaling_utils.go +++ b/vertical-pod-autoscaler/e2e/v1/autoscaling_utils.go @@ -31,20 +31,20 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/replicaset" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" + scaleclient "k8s.io/client-go/scale" imageutils "k8s.io/kubernetes/test/utils/image" ) const ( dynamicConsumptionTimeInSeconds = 30 - staticConsumptionTimeInSeconds = 3600 dynamicRequestSizeInMillicores = 20 dynamicRequestSizeInMegabytes = 100 dynamicRequestSizeCustomMetric = 10 @@ -65,18 +65,16 @@ const ( ) var ( - resourceConsumerImage = imageutils.GetE2EImage(imageutils.ResourceConsumer) - resourceConsumerControllerImage = imageutils.GetE2EImage(imageutils.ResourceController) + resourceConsumerImage = imageutils.GetE2EImage(imageutils.ResourceConsumer) ) var ( - // KindRC var + // KindRC is the GVK for ReplicationController KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"} - // KindDeployment var - KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"} - // KindReplicaSet var - KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"} - subresource = "scale" + // KindDeployment is the GVK for Deployment + KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"} + // KindReplicaSet is the GVK for ReplicaSet + KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"} ) /* @@ -108,28 +106,28 @@ type ResourceConsumer struct { requestSizeCustomMetric int } -// NewDynamicResourceConsumer func -func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuRequest, memRequest int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { +// NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer +func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, - dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuRequest, memRequest, clientset, scaleClient, nil, nil) + dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil) } /* -newResourceConsumer creates new ResourceConsumer +NewResourceConsumer creates new ResourceConsumer initCPUTotal argument is in millicores initMemoryTotal argument is in megabytes -memRequest argument is in megabytes, memRequest is a maximum amount of memory that can be consumed by a single pod -cpuRequest argument is in millicores, cpuRequest is a maximum amount of cpu that can be consumed by a single pod +memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod +cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod */ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, - requestSizeInMegabytes int, requestSizeCustomMetric int, cpuRequest, memRequest int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer { + requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer { if podAnnotations == nil { podAnnotations = make(map[string]string) } if serviceAnnotations == nil { serviceAnnotations = make(map[string]string) } - runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuRequest, memRequest, podAnnotations, serviceAnnotations) + runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations) rc := &ResourceConsumer{ name: name, controllerName: name + "-ctrl", @@ -249,14 +247,13 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). - Context(ctx). Name(rc.controllerName). Suffix("ConsumeCPU"). Param("millicores", strconv.Itoa(millicores)). Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores)) framework.Logf("ConsumeCPU URL: %v", *req.URL()) - _, err = req.DoRaw() + _, err = req.DoRaw(ctx) if err != nil { framework.Logf("ConsumeCPU failure: %v", err) return false, nil @@ -276,14 +273,13 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). - Context(ctx). Name(rc.controllerName). Suffix("ConsumeMem"). Param("megabytes", strconv.Itoa(megabytes)). Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes)) framework.Logf("ConsumeMem URL: %v", *req.URL()) - _, err = req.DoRaw() + _, err = req.DoRaw(ctx) if err != nil { framework.Logf("ConsumeMem failure: %v", err) return false, nil @@ -303,7 +299,6 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). - Context(ctx). Name(rc.controllerName). Suffix("BumpMetric"). Param("metric", customMetricName). @@ -311,7 +306,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric)) framework.Logf("ConsumeCustomMetric URL: %v", *req.URL()) - _, err = req.DoRaw() + _, err = req.DoRaw(ctx) if err != nil { framework.Logf("ConsumeCustomMetric failure: %v", err) return false, nil @@ -321,7 +316,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { framework.ExpectNoError(err) } -// CleanUp func +// CleanUp clean up the background goroutines responsible for consuming resources. func (rc *ResourceConsumer) CleanUp() { ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name)) close(rc.stopCPU) @@ -332,14 +327,14 @@ func (rc *ResourceConsumer) CleanUp() { time.Sleep(10 * time.Second) kind := rc.kind.GroupKind() framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil)) - framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, api.Kind("ReplicationController"), rc.nsName, rc.controllerName)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, metav1.DeleteOptions{})) + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, schema.GroupKind{Kind: "ReplicationController"}, rc.nsName, rc.controllerName)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.controllerName, metav1.DeleteOptions{})) } func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuRequestMillis, memRequestMb int64, podAnnotations, serviceAnnotations map[string]string) { ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) - _, err := c.CoreV1().Services(ns).Create(&v1.Service{ + _, err := c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, Annotations: serviceAnnotations, @@ -354,7 +349,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st "name": name, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) rcConfig := testutils.RCConfig{ @@ -365,37 +360,34 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st Timeout: timeoutRC, Replicas: replicas, CpuRequest: cpuRequestMillis, - MemRequest: memRequestMb * 1024 * 1024, // Mem Request is in bytes + MemRequest: memRequestMb * 1024 * 1024, // MemRequest is in bytes Annotations: podAnnotations, } switch kind { case KindRC: - framework.ExpectNoError(framework.RunRC(rcConfig)) - break + framework.ExpectNoError(e2erc.RunRC(rcConfig)) case KindDeployment: dpConfig := testutils.DeploymentConfig{ RCConfig: rcConfig, } ginkgo.By(fmt.Sprintf("creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace)) dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo - dpConfig.ContainerDumpFunc = framework.LogFailedContainers + dpConfig.ContainerDumpFunc = e2ekubectl.LogFailedContainers framework.ExpectNoError(testutils.RunDeployment(dpConfig)) - break case KindReplicaSet: rsConfig := testutils.ReplicaSetConfig{ RCConfig: rcConfig, } ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace)) - framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig)) - break + framework.ExpectNoError(runReplicaSet(rsConfig)) default: framework.Failf(invalidKind) } ginkgo.By(fmt.Sprintf("Running controller")) controllerName := name + "-ctrl" - _, err = c.CoreV1().Services(ns).Create(&v1.Service{ + _, err = c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: controllerName, }, @@ -409,27 +401,35 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st "name": controllerName, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) dnsClusterFirst := v1.DNSClusterFirst controllerRcConfig := testutils.RCConfig{ Client: c, - Image: resourceConsumerControllerImage, + Image: imageutils.GetE2EImage(imageutils.Agnhost), Name: controllerName, Namespace: ns, Timeout: timeoutRC, Replicas: 1, - Command: []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"}, + Command: []string{"/agnhost", "resource-consumer-controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"}, DNSPolicy: &dnsClusterFirst, } - framework.ExpectNoError(framework.RunRC(controllerRcConfig)) + framework.ExpectNoError(e2erc.RunRC(controllerRcConfig)) // Wait for endpoints to propagate for the controller service. framework.ExpectNoError(framework.WaitForServiceEndpointsNum( c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) } +// runReplicaSet launches (and verifies correctness) of a replicaset. +func runReplicaSet(config testutils.ReplicaSetConfig) error { + ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace)) + config.NodeDumpFunc = framework.DumpNodeDebugInfo + config.ContainerDumpFunc = e2ekubectl.LogFailedContainers + return testutils.RunReplicaSet(config) +} + func runOomingReplicationController(c clientset.Interface, ns, name string, replicas int) { ginkgo.By(fmt.Sprintf("Running OOMing RC %s with %v replicas", name, replicas)) @@ -450,6 +450,6 @@ func runOomingReplicationController(c clientset.Interface, ns, name string, repl } ginkgo.By(fmt.Sprintf("Creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace)) dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo - dpConfig.ContainerDumpFunc = framework.LogFailedContainers + dpConfig.ContainerDumpFunc = e2ekubectl.LogFailedContainers framework.ExpectNoError(testutils.RunDeployment(dpConfig)) } diff --git a/vertical-pod-autoscaler/e2e/v1/common.go b/vertical-pod-autoscaler/e2e/v1/common.go index ff3b3be125e6..be64831cf25c 100644 --- a/vertical-pod-autoscaler/e2e/v1/common.go +++ b/vertical-pod-autoscaler/e2e/v1/common.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "encoding/json" "fmt" "time" @@ -121,7 +122,7 @@ func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) d.Spec.Replicas = &replicas - d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d) + d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), d, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when starting deployment creation") err = framework_deployment.WaitForDeploymentComplete(f.ClientSet, d) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for deployment creation to finish") @@ -203,7 +204,7 @@ func getPodSelectorExcludingDonePodsOrDie() string { func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: getPodSelectorExcludingDonePodsOrDie()} - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(options) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), options) } // NewTestCronJob returns a CronJob for test purposes. @@ -252,11 +253,11 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int } func createCronJob(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) { - return c.BatchV1beta1().CronJobs(ns).Create(cronJob) + return c.BatchV1beta1().CronJobs(ns).Create(context.TODO(), cronJob, metav1.CreateOptions{}) } func getCronJob(c clientset.Interface, ns, name string) (*batchv1beta1.CronJob, error) { - return c.BatchV1beta1().CronJobs(ns).Get(name, metav1.GetOptions{}) + return c.BatchV1beta1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) } // SetupHamsterCronJob creates and sets up a new CronJob @@ -358,7 +359,7 @@ func getVpaClientSet(f *framework.Framework) vpa_clientset.Interface { // InstallVPA installs a VPA object in the test cluster. func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) { vpaClientSet := getVpaClientSet(f) - _, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalers(f.Namespace.Name).Create(vpa) + _, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalers(f.Namespace.Name).Create(context.TODO(), vpa, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA") } @@ -369,7 +370,7 @@ func InstallRawVPA(f *framework.Framework, obj interface{}) error { Namespace(f.Namespace.Name). Resource("verticalpodautoscalers"). Body(obj). - Do() + Do(context.TODO()) return err.Error() } @@ -384,7 +385,7 @@ func PatchVpaRecommendation(f *framework.Framework, vpa *vpa_types.VerticalPodAu Value: *newStatus, }}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = getVpaClientSet(f).AutoscalingV1().VerticalPodAutoscalers(f.Namespace.Name).Patch(vpa.Name, types.JSONPatchType, bytes) + _, err = getVpaClientSet(f).AutoscalingV1().VerticalPodAutoscalers(f.Namespace.Name).Patch(context.TODO(), vpa.Name, types.JSONPatchType, bytes, metav1.PatchOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch VPA.") } @@ -395,7 +396,7 @@ func AnnotatePod(f *framework.Framework, podName, annotationName, annotationValu Path: fmt.Sprintf("/metadata/annotations/%v", annotationName), Value: annotationValue, }}) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Patch(podName, types.JSONPatchType, bytes) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Patch(context.TODO(), podName, types.JSONPatchType, bytes, metav1.PatchOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch pod.") gomega.Expect(pod.Annotations[annotationName]).To(gomega.Equal(annotationValue)) } @@ -507,7 +508,7 @@ func WaitForVPAMatch(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutosc var polledVpa *vpa_types.VerticalPodAutoscaler err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - polledVpa, err = c.AutoscalingV1().VerticalPodAutoscalers(vpa.Namespace).Get(vpa.Name, metav1.GetOptions{}) + polledVpa, err = c.AutoscalingV1().VerticalPodAutoscalers(vpa.Namespace).Get(context.TODO(), vpa.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -583,7 +584,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC } lr.Spec.Limits = append(lr.Spec.Limits, lrItem) } - _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) + _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(context.TODO(), lr, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range") } diff --git a/vertical-pod-autoscaler/e2e/v1/e2e.go b/vertical-pod-autoscaler/e2e/v1/e2e.go index 44dfb6242871..792baa4c89c9 100644 --- a/vertical-pod-autoscaler/e2e/v1/e2e.go +++ b/vertical-pod-autoscaler/e2e/v1/e2e.go @@ -19,45 +19,66 @@ package autoscaling // This file is a cut down fork of k8s/test/e2e/e2e.go import ( + "context" + "fmt" + "io/ioutil" "os" "path" "testing" + "time" + + "k8s.io/klog" "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/reporters" "github.com/onsi/gomega" - "k8s.io/klog" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeutils "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/component-base/logs" + "k8s.io/component-base/version" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/manifest" + e2ereporters "k8s.io/kubernetes/test/e2e/reporters" + testutils "k8s.io/kubernetes/test/utils" + utilnet "k8s.io/utils/net" + clientset "k8s.io/client-go/kubernetes" // ensure auth plugins are loaded _ "k8s.io/client-go/plugin/pkg/client/auth" // ensure that cloud providers are loaded - _ "k8s.io/kubernetes/test/e2e/framework/providers/aws" - _ "k8s.io/kubernetes/test/e2e/framework/providers/azure" + _ "k8s.io/kubernetes/test/e2e/framework/providers/gce" - _ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark" - _ "k8s.io/kubernetes/test/e2e/framework/providers/openstack" - _ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere" +) + +const ( + // namespaceCleanupTimeout is how long to wait for the namespace to be deleted. + // If there are any orphaned namespaces to clean up, this test is running + // on a long lived cluster. A long wait here is preferably to spurious test + // failures caused by leaked resources from a previous test run. + namespaceCleanupTimeout = 15 * time.Minute ) var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { - framework.SetupSuite() + setupSuite() return nil }, func(data []byte) { // Run on all Ginkgo nodes - framework.SetupSuitePerGinkgoNode() + setupSuitePerGinkgoNode() }) var _ = ginkgo.SynchronizedAfterSuite(func() { - framework.CleanupSuite() + CleanupSuite() }, func() { - framework.AfterSuiteActions() + AfterSuiteActions() }) // RunE2ETests checks configuration parameters (specified through flags) and then runs @@ -70,7 +91,7 @@ func RunE2ETests(t *testing.T) { logs.InitLogs() defer logs.FlushLogs() - gomega.RegisterFailHandler(e2elog.Fail) + gomega.RegisterFailHandler(framework.Fail) // Disable skipped tests unless they are explicitly requested. if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" { config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]` @@ -87,7 +108,272 @@ func RunE2ETests(t *testing.T) { r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, "junit_03.xml"))) } } - klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode) + // Stream the progress to stdout and optionally a URL accepting progress updates. + r = append(r, e2ereporters.NewProgressReporter(framework.TestContext.ProgressReportURL)) + + // The DetailsRepoerter will output details about every test (name, files, lines, etc) which helps + // when documenting our tests. + if len(framework.TestContext.SpecSummaryOutput) > 0 { + r = append(r, e2ereporters.NewDetailsReporterFile(framework.TestContext.SpecSummaryOutput)) + } + + klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode) ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) } + +// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it +// to flip to Ready, log its output and delete it. +func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { + path := "test/images/clusterapi-tester/pod.yaml" + framework.Logf("Parsing pod from %v", path) + p, err := manifest.PodFromManifest(path) + if err != nil { + framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err) + return + } + p.Namespace = ns + if _, err := c.CoreV1().Pods(ns).Create(context.TODO(), p, metav1.CreateOptions{}); err != nil { + framework.Logf("Failed to create %v: %v", p.Name, err) + return + } + defer func() { + if err := c.CoreV1().Pods(ns).Delete(context.TODO(), p.Name, metav1.DeleteOptions{}); err != nil { + framework.Logf("Failed to delete pod %v: %v", p.Name, err) + } + }() + timeout := 5 * time.Minute + if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil { + framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err) + return + } + logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name) + if err != nil { + framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err) + } else { + framework.Logf("Output of clusterapi-tester:\n%v", logs) + } +} + +// getDefaultClusterIPFamily obtains the default IP family of the cluster +// using the Cluster IP address of the kubernetes service created in the default namespace +// This unequivocally identifies the default IP family because services are single family +// TODO: dual-stack may support multiple families per service +// but we can detect if a cluster is dual stack because pods have two addresses (one per family) +func getDefaultClusterIPFamily(c clientset.Interface) string { + // Get the ClusterIP of the kubernetes service created in the default namespace + svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + if err != nil { + framework.Failf("Failed to get kubernetes service ClusterIP: %v", err) + } + + if utilnet.IsIPv6String(svc.Spec.ClusterIP) { + return "ipv6" + } + return "ipv4" +} + +// waitForDaemonSets for all daemonsets in the given namespace to be ready +// (defined as all but 'allowedNotReadyNodes' pods associated with that +// daemonset are ready). +func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error { + start := time.Now() + framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", + timeout, ns) + + return wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { + dsList, err := c.AppsV1().DaemonSets(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + var notReadyDaemonSets []string + for _, ds := range dsList.Items { + framework.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds())) + if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes { + notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name) + } + } + + if len(notReadyDaemonSets) > 0 { + framework.Logf("there are not ready daemonsets: %v", notReadyDaemonSets) + return false, nil + } + + return true, nil + }) +} + +// setupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step. +// There are certain operations we only want to run once per overall test invocation +// (such as deleting old namespaces, or verifying that all system pods are running. +// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite +// to ensure that these operations only run on the first parallel Ginkgo node. +// +// This function takes two parameters: one function which runs on only the first Ginkgo node, +// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes, +// accepting the byte array. +func setupSuite() { + // Run only on Ginkgo node 1 + + switch framework.TestContext.Provider { + case "gce", "gke": + framework.LogClusterImageSources() + } + + c, err := framework.LoadClientset() + if err != nil { + klog.Fatal("Error loading client: ", err) + } + + // Delete any namespaces except those created by the system. This ensures no + // lingering resources are left over from a previous test run. + if framework.TestContext.CleanStart { + deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */ + []string{ + metav1.NamespaceSystem, + metav1.NamespaceDefault, + metav1.NamespacePublic, + v1.NamespaceNodeLease, + }) + if err != nil { + framework.Failf("Error deleting orphaned namespaces: %v", err) + } + klog.Infof("Waiting for deletion of the following namespaces: %v", deleted) + if err := framework.WaitForNamespacesDeleted(c, deleted, namespaceCleanupTimeout); err != nil { + framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) + } + } + + // In large clusters we may get to this point but still have a bunch + // of nodes without Routes created. Since this would make a node + // unschedulable, we need to wait until all of them are schedulable. + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + + // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted + if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes { + nodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) + framework.TestContext.CloudConfig.NumNodes = len(nodes.Items) + } + + // Ensure all pods are running and ready before starting tests (otherwise, + // cluster infrastructure pods that are being pulled or started can block + // test pods from running, and tests that ensure all pods are running and + // ready will fail). + podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout + // TODO: In large clusters, we often observe a non-starting pods due to + // #41007. To avoid those pods preventing the whole test runs (and just + // wasting the whole run), we allow for some not-ready pods (with the + // number equal to the number of allowed not-ready nodes). + if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { + framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) + e2ekubectl.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf) + runKubernetesServiceTestContainer(c, metav1.NamespaceDefault) + framework.Failf("Error waiting for all pods to be running and ready: %v", err) + } + + if err := waitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { + framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) + } + + // Log the version of the server and this client. + framework.Logf("e2e test version: %s", version.Get().GitVersion) + + dc := c.DiscoveryClient + + serverVersion, serverErr := dc.ServerVersion() + if serverErr != nil { + framework.Logf("Unexpected server error retrieving version: %v", serverErr) + } + if serverVersion != nil { + framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion) + } + + if framework.TestContext.NodeKiller.Enabled { + nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider) + go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh) + } +} + +// setupSuitePerGinkgoNode is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step. +// There are certain operations we only want to run once per overall test invocation on each Ginkgo node +// such as making some global variables accessible to all parallel executions +// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite +// Ref: https://onsi.github.io/ginkgo/#parallel-specs +func setupSuitePerGinkgoNode() { + // Obtain the default IP family of the cluster + // Some e2e test are designed to work on IPv4 only, this global variable + // allows to adapt those tests to work on both IPv4 and IPv6 + // TODO: dual-stack + // the dual stack clusters can be ipv4-ipv6 or ipv6-ipv4, order matters, + // and services use the primary IP family by default + c, err := framework.LoadClientset() + if err != nil { + klog.Fatal("Error loading client: ", err) + } + framework.TestContext.IPFamily = getDefaultClusterIPFamily(c) + framework.Logf("Cluster IP family: %s", framework.TestContext.IPFamily) +} + +// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step. +// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs). +// Here, the order of functions is reversed; first, the function which runs everywhere, +// and then the function that only runs on the first Ginkgo node. +func CleanupSuite() { + // Run on all Ginkgo nodes + framework.Logf("Running AfterSuite actions on all nodes") + framework.RunCleanupActions() +} + +// AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite +func AfterSuiteActions() { + // Run only Ginkgo on node 1 + framework.Logf("Running AfterSuite actions on node 1") + if framework.TestContext.ReportDir != "" { + framework.CoreDump(framework.TestContext.ReportDir) + } + if framework.TestContext.GatherSuiteMetricsAfterTest { + if err := gatherTestSuiteMetrics(); err != nil { + framework.Logf("Error gathering metrics: %v", err) + } + } + if framework.TestContext.NodeKiller.Enabled { + close(framework.TestContext.NodeKiller.NodeKillerStopCh) + } +} + +func gatherTestSuiteMetrics() error { + framework.Logf("Gathering metrics") + c, err := framework.LoadClientset() + if err != nil { + return fmt.Errorf("error loading client: %v", err) + } + + // Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally). + grabber, err := e2emetrics.NewMetricsGrabber(c, nil, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics) + if err != nil { + return fmt.Errorf("failed to create MetricsGrabber: %v", err) + } + + received, err := grabber.Grab() + if err != nil { + return fmt.Errorf("failed to grab metrics: %v", err) + } + + metricsForE2E := (*e2emetrics.ComponentCollection)(&received) + metricsJSON := metricsForE2E.PrintJSON() + if framework.TestContext.ReportDir != "" { + filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json") + if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil { + return fmt.Errorf("error writing to %q: %v", filePath, err) + } + } else { + framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON) + } + + return nil +} diff --git a/vertical-pod-autoscaler/e2e/v1/e2e_test.go b/vertical-pod-autoscaler/e2e/v1/e2e_test.go index e71b9098ce14..084887e93406 100644 --- a/vertical-pod-autoscaler/e2e/v1/e2e_test.go +++ b/vertical-pod-autoscaler/e2e/v1/e2e_test.go @@ -33,7 +33,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" "k8s.io/kubernetes/test/e2e/framework/testfiles" - "k8s.io/kubernetes/test/e2e/framework/viperconfig" "k8s.io/kubernetes/test/e2e/generated" "k8s.io/kubernetes/test/utils/image" ) @@ -55,7 +54,7 @@ func TestMain(m *testing.M) { // Now that we know which Viper config (if any) was chosen, // parse it and update those options which weren't already set via command line flags // (which have higher priority). - if err := viperconfig.ViperizeFlags(*viperConfig, "e2e", flag.CommandLine); err != nil { + if err := viperizeFlags(*viperConfig, "e2e", flag.CommandLine); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } diff --git a/vertical-pod-autoscaler/e2e/v1/full_vpa.go b/vertical-pod-autoscaler/e2e/v1/full_vpa.go index 179627c45a0f..103afef89428 100644 --- a/vertical-pod-autoscaler/e2e/v1/full_vpa.go +++ b/vertical-pod-autoscaler/e2e/v1/full_vpa.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "time" @@ -85,7 +86,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() { vpaClientSet = vpa_clientset.NewForConfigOrDie(config) vpaClient := vpaClientSet.AutoscalingV1() - _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD) + _, err = vpaClient.VerticalPodAutoscalers(ns).Create(context.TODO(), vpaCRD, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -152,7 +153,7 @@ var _ = FullVpaE2eDescribe("OOMing pods under VPA", func() { vpaClientSet = vpa_clientset.NewForConfigOrDie(config) vpaClient := vpaClientSet.AutoscalingV1() - _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD) + _, err = vpaClient.VerticalPodAutoscalers(ns).Create(context.TODO(), vpaCRD, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -174,7 +175,7 @@ func waitForPodsMatch(f *framework.Framework, timeout time.Duration, listOptions ns := f.Namespace.Name c := f.ClientSet - podList, err := c.CoreV1().Pods(ns).List(listOptions) + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), listOptions) if err != nil { return false, err } diff --git a/vertical-pod-autoscaler/e2e/v1/recommender.go b/vertical-pod-autoscaler/e2e/v1/recommender.go index ac47bc5a65cd..bdfd28d40ac1 100644 --- a/vertical-pod-autoscaler/e2e/v1/recommender.go +++ b/vertical-pod-autoscaler/e2e/v1/recommender.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "strings" "time" @@ -133,12 +134,12 @@ var _ = RecommenderE2eDescribe("Checkpoints", func() { }, } - _, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalerCheckpoints(ns).Create(&checkpoint) + _, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalerCheckpoints(ns).Create(context.TODO(), &checkpoint, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) time.Sleep(15 * time.Minute) - list, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalerCheckpoints(ns).List(metav1.ListOptions{}) + list, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalerCheckpoints(ns).List(context.TODO(), metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(list.Items).To(gomega.BeEmpty()) }) @@ -378,7 +379,7 @@ func createVpaCRDWithContainerScalingModes(f *framework.Framework, modes ...vpa_ func deleteRecommender(c clientset.Interface) error { namespace := "kube-system" listOptions := metav1.ListOptions{} - podList, err := c.CoreV1().Pods(namespace).List(listOptions) + podList, err := c.CoreV1().Pods(namespace).List(context.TODO(), listOptions) if err != nil { fmt.Println("Could not list pods.", err) return err @@ -387,7 +388,7 @@ func deleteRecommender(c clientset.Interface) error { for _, pod := range podList.Items { if strings.HasPrefix(pod.Name, "vpa-recommender") { fmt.Print("Deleting pod.", namespace, pod.Name) - err := c.CoreV1().Pods(namespace).Delete(pod.Name, &metav1.DeleteOptions{}) + err := c.CoreV1().Pods(namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) if err != nil { return err } diff --git a/vertical-pod-autoscaler/e2e/v1/updater.go b/vertical-pod-autoscaler/e2e/v1/updater.go index 9329ba3229df..4aaeae1a85b6 100644 --- a/vertical-pod-autoscaler/e2e/v1/updater.go +++ b/vertical-pod-autoscaler/e2e/v1/updater.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "time" @@ -52,7 +53,7 @@ var _ = UpdaterE2eDescribe("Updater", func() { ginkgo.By("Deleting the Admission Controller status") close(stopCh) err := f.ClientSet.CoordinationV1().Leases(status.AdmissionControllerStatusNamespace). - Delete(status.AdmissionControllerStatusName, &metav1.DeleteOptions{}) + Delete(context.TODO(), status.AdmissionControllerStatusName, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() statusUpdater.Run(stopCh) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/actuation.go b/vertical-pod-autoscaler/e2e/v1beta2/actuation.go index efeb2dcfede9..9e53abbf0511 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/actuation.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/actuation.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "time" @@ -36,6 +37,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment" framework_job "k8s.io/kubernetes/test/e2e/framework/job" + framework_rc "k8s.io/kubernetes/test/e2e/framework/rc" framework_rs "k8s.io/kubernetes/test/e2e/framework/replicaset" framework_ss "k8s.io/kubernetes/test/e2e/framework/statefulset" testutils "k8s.io/kubernetes/test/utils" @@ -188,7 +190,7 @@ var _ = ActuationSuiteE2eDescribe("Actuation", func() { permissiveMaxUnavailable := 7 // Creating new PDB and removing old one, since PDBs are immutable at the moment setupPDB(f, "hamster-pdb-2", permissiveMaxUnavailable) - err = c.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(pdb.Name, &metav1.DeleteOptions{}) + err = c.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(context.TODO(), pdb.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, sleep for %s", VpaEvictionTimeout.String())) @@ -365,7 +367,7 @@ func getCPURequest(podSpec apiv1.PodSpec) resource.Quantity { } func killPod(f *framework.Framework, podList *apiv1.PodList) { - f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podList.Items[0].Name, &metav1.DeleteOptions{}) + f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podList.Items[0].Name, metav1.DeleteOptions{}) err := WaitForPodsRestarted(f, podList) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -468,8 +470,7 @@ func setupHamsterController(f *framework.Framework, controllerKind, cpu, memory func setupHamsterReplicationController(f *framework.Framework, cpu, memory string, replicas int32) { hamsterContainer := SetupHamsterContainer(cpu, memory) - rc := framework.RcByNameContainer("hamster-rc", replicas, "k8s.gcr.io/ubuntu-slim:0.1", - hamsterLabels, hamsterContainer, nil) + rc := framework_rc.ByNameContainer("hamster-rc", replicas, hamsterLabels, hamsterContainer, nil) rc.Namespace = f.Namespace.Name err := testutils.CreateRCWithRetries(f.ClientSet, f.Namespace.Name, rc) @@ -509,8 +510,7 @@ func setupHamsterJob(f *framework.Framework, cpu, memory string, replicas int32) } func setupHamsterRS(f *framework.Framework, cpu, memory string, replicas int32) { - rs := framework_rs.NewReplicaSet("hamster-rs", f.Namespace.Name, replicas, - hamsterLabels, "", "") + rs := newReplicaSet("hamster-rs", f.Namespace.Name, replicas, hamsterLabels, "", "") rs.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory) err := createReplicaSetWithRetries(f.ClientSet, f.Namespace.Name, rs) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -541,7 +541,7 @@ func setupPDB(f *framework.Framework, name string, maxUnavailable int) *policyv1 }, }, } - _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(f.Namespace.Name).Create(pdb) + _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(f.Namespace.Name).Create(context.TODO(), pdb, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return pdb } @@ -557,7 +557,7 @@ func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a return fmt.Errorf("object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().ReplicaSets(namespace).Create(obj) + _, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrs.IsAlreadyExists(err) { return true, nil } @@ -574,7 +574,7 @@ func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, ob return fmt.Errorf("object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().StatefulSets(namespace).Create(obj) + _, err := c.AppsV1().StatefulSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) if err == nil || apierrs.IsAlreadyExists(err) { return true, nil } @@ -585,3 +585,37 @@ func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, ob } return testutils.RetryWithExponentialBackOff(createFunc) } + +// newReplicaSet returns a new ReplicaSet. +func newReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *appsv1.ReplicaSet { + return &appsv1.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: appsv1.ReplicaSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: podLabels, + }, + Replicas: &replicas, + Template: apiv1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + }, + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Name: imageName, + Image: image, + SecurityContext: &apiv1.SecurityContext{}, + }, + }, + }, + }, + }, + } +} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index dbb8d00dafd7..7a928c99d9ad 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "time" @@ -530,7 +531,7 @@ func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) zero := int32(0) deployment.Spec.Replicas = &zero c, ns := f.ClientSet, f.Namespace.Name - deployment, err := c.AppsV1().Deployments(ns).Create(deployment) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when creating deployment with size 0") err = framework_deployment.WaitForDeploymentComplete(c, deployment) @@ -554,12 +555,12 @@ func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) Replicas: desiredPodCount, }, } - afterScale, err := c.AppsV1().Deployments(ns).UpdateScale(deployment.Name, &scale) + afterScale, err := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deployment.Name, &scale, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(afterScale.Spec.Replicas).To(gomega.Equal(desiredPodCount), fmt.Sprintf("expected %d replicas after scaling", desiredPodCount)) // After scaling deployment we need to retrieve current version with updated replicas count. - deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when getting scaled deployment") err = framework_deployment.WaitForDeploymentComplete(c, deployment) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when waiting for deployment to resize") diff --git a/vertical-pod-autoscaler/e2e/v1beta2/autoscaling_utils.go b/vertical-pod-autoscaler/e2e/v1beta2/autoscaling_utils.go index 230ab8ba5008..1b632eeaf2bf 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/autoscaling_utils.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/autoscaling_utils.go @@ -31,20 +31,20 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/replicaset" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" + scaleclient "k8s.io/client-go/scale" imageutils "k8s.io/kubernetes/test/utils/image" ) const ( dynamicConsumptionTimeInSeconds = 30 - staticConsumptionTimeInSeconds = 3600 dynamicRequestSizeInMillicores = 20 dynamicRequestSizeInMegabytes = 100 dynamicRequestSizeCustomMetric = 10 @@ -65,18 +65,16 @@ const ( ) var ( - resourceConsumerImage = imageutils.GetE2EImage(imageutils.ResourceConsumer) - resourceConsumerControllerImage = imageutils.GetE2EImage(imageutils.ResourceController) + resourceConsumerImage = imageutils.GetE2EImage(imageutils.ResourceConsumer) ) var ( - // KindRC var + // KindRC is the GVK for ReplicationController KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"} - // KindDeployment var - KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"} - // KindReplicaSet var - KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"} - subresource = "scale" + // KindDeployment is the GVK for Deployment + KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"} + // KindReplicaSet is the GVK for ReplicaSet + KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"} ) /* @@ -108,28 +106,28 @@ type ResourceConsumer struct { requestSizeCustomMetric int } -// NewDynamicResourceConsumer func -func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuRequest, memRequest int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { +// NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer +func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, - dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuRequest, memRequest, clientset, scaleClient, nil, nil) + dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil) } /* -newResourceConsumer creates new ResourceConsumer +NewResourceConsumer creates new ResourceConsumer initCPUTotal argument is in millicores initMemoryTotal argument is in megabytes -memRequest argument is in megabytes, memRequest is a maximum amount of memory that can be consumed by a single pod -cpuRequest argument is in millicores, cpuRequest is a maximum amount of cpu that can be consumed by a single pod +memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod +cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod */ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, - requestSizeInMegabytes int, requestSizeCustomMetric int, cpuRequest, memRequest int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer { + requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer { if podAnnotations == nil { podAnnotations = make(map[string]string) } if serviceAnnotations == nil { serviceAnnotations = make(map[string]string) } - runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuRequest, memRequest, podAnnotations, serviceAnnotations) + runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations) rc := &ResourceConsumer{ name: name, controllerName: name + "-ctrl", @@ -249,14 +247,13 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). - Context(ctx). Name(rc.controllerName). Suffix("ConsumeCPU"). Param("millicores", strconv.Itoa(millicores)). Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores)) framework.Logf("ConsumeCPU URL: %v", *req.URL()) - _, err = req.DoRaw() + _, err = req.DoRaw(ctx) if err != nil { framework.Logf("ConsumeCPU failure: %v", err) return false, nil @@ -276,14 +273,13 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). - Context(ctx). Name(rc.controllerName). Suffix("ConsumeMem"). Param("megabytes", strconv.Itoa(megabytes)). Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes)) framework.Logf("ConsumeMem URL: %v", *req.URL()) - _, err = req.DoRaw() + _, err = req.DoRaw(ctx) if err != nil { framework.Logf("ConsumeMem failure: %v", err) return false, nil @@ -303,7 +299,6 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). - Context(ctx). Name(rc.controllerName). Suffix("BumpMetric"). Param("metric", customMetricName). @@ -311,7 +306,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric)) framework.Logf("ConsumeCustomMetric URL: %v", *req.URL()) - _, err = req.DoRaw() + _, err = req.DoRaw(ctx) if err != nil { framework.Logf("ConsumeCustomMetric failure: %v", err) return false, nil @@ -321,7 +316,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { framework.ExpectNoError(err) } -// CleanUp func +// CleanUp clean up the background goroutines responsible for consuming resources. func (rc *ResourceConsumer) CleanUp() { ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name)) close(rc.stopCPU) @@ -332,14 +327,14 @@ func (rc *ResourceConsumer) CleanUp() { time.Sleep(10 * time.Second) kind := rc.kind.GroupKind() framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil)) - framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, api.Kind("ReplicationController"), rc.nsName, rc.controllerName)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, metav1.DeleteOptions{})) + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, schema.GroupKind{Kind: "ReplicationController"}, rc.nsName, rc.controllerName)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.controllerName, metav1.DeleteOptions{})) } func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuRequestMillis, memRequestMb int64, podAnnotations, serviceAnnotations map[string]string) { ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) - _, err := c.CoreV1().Services(ns).Create(&v1.Service{ + _, err := c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, Annotations: serviceAnnotations, @@ -354,7 +349,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st "name": name, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) rcConfig := testutils.RCConfig{ @@ -365,37 +360,34 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st Timeout: timeoutRC, Replicas: replicas, CpuRequest: cpuRequestMillis, - MemRequest: memRequestMb * 1024 * 1024, // MemLimit is in bytes + MemRequest: memRequestMb * 1024 * 1024, // MemRequest is in bytes Annotations: podAnnotations, } switch kind { case KindRC: - framework.ExpectNoError(framework.RunRC(rcConfig)) - break + framework.ExpectNoError(e2erc.RunRC(rcConfig)) case KindDeployment: dpConfig := testutils.DeploymentConfig{ RCConfig: rcConfig, } ginkgo.By(fmt.Sprintf("creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace)) dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo - dpConfig.ContainerDumpFunc = framework.LogFailedContainers + dpConfig.ContainerDumpFunc = e2ekubectl.LogFailedContainers framework.ExpectNoError(testutils.RunDeployment(dpConfig)) - break case KindReplicaSet: rsConfig := testutils.ReplicaSetConfig{ RCConfig: rcConfig, } ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace)) - framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig)) - break + framework.ExpectNoError(runReplicaSet(rsConfig)) default: framework.Failf(invalidKind) } ginkgo.By(fmt.Sprintf("Running controller")) controllerName := name + "-ctrl" - _, err = c.CoreV1().Services(ns).Create(&v1.Service{ + _, err = c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: controllerName, }, @@ -409,27 +401,35 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st "name": controllerName, }, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err) dnsClusterFirst := v1.DNSClusterFirst controllerRcConfig := testutils.RCConfig{ Client: c, - Image: resourceConsumerControllerImage, + Image: imageutils.GetE2EImage(imageutils.Agnhost), Name: controllerName, Namespace: ns, Timeout: timeoutRC, Replicas: 1, - Command: []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"}, + Command: []string{"/agnhost", "resource-consumer-controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"}, DNSPolicy: &dnsClusterFirst, } - framework.ExpectNoError(framework.RunRC(controllerRcConfig)) + framework.ExpectNoError(e2erc.RunRC(controllerRcConfig)) // Wait for endpoints to propagate for the controller service. framework.ExpectNoError(framework.WaitForServiceEndpointsNum( c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) } +// runReplicaSet launches (and verifies correctness) of a replicaset. +func runReplicaSet(config testutils.ReplicaSetConfig) error { + ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace)) + config.NodeDumpFunc = framework.DumpNodeDebugInfo + config.ContainerDumpFunc = e2ekubectl.LogFailedContainers + return testutils.RunReplicaSet(config) +} + func runOomingReplicationController(c clientset.Interface, ns, name string, replicas int) { ginkgo.By(fmt.Sprintf("Running OOMing RC %s with %v replicas", name, replicas)) @@ -450,6 +450,6 @@ func runOomingReplicationController(c clientset.Interface, ns, name string, repl } ginkgo.By(fmt.Sprintf("Creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace)) dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo - dpConfig.ContainerDumpFunc = framework.LogFailedContainers + dpConfig.ContainerDumpFunc = e2ekubectl.LogFailedContainers framework.ExpectNoError(testutils.RunDeployment(dpConfig)) } diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 8ad02f16f366..2f957499f1c4 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "encoding/json" "fmt" "time" @@ -121,7 +122,7 @@ func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) d.Spec.Replicas = &replicas - d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d) + d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), d, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when starting deployment creation") err = framework_deployment.WaitForDeploymentComplete(f.ClientSet, d) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for deployment creation to finish") @@ -203,7 +204,7 @@ func getPodSelectorExcludingDonePodsOrDie() string { func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: getPodSelectorExcludingDonePodsOrDie()} - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(options) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), options) } // NewTestCronJob returns a CronJob for test purposes. @@ -252,11 +253,11 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int } func createCronJob(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) { - return c.BatchV1beta1().CronJobs(ns).Create(cronJob) + return c.BatchV1beta1().CronJobs(ns).Create(context.TODO(), cronJob, metav1.CreateOptions{}) } func getCronJob(c clientset.Interface, ns, name string) (*batchv1beta1.CronJob, error) { - return c.BatchV1beta1().CronJobs(ns).Get(name, metav1.GetOptions{}) + return c.BatchV1beta1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) } // SetupHamsterCronJob creates and sets up a new CronJob @@ -358,18 +359,18 @@ func getVpaClientSet(f *framework.Framework) vpa_clientset.Interface { // InstallVPA installs a VPA object in the test cluster. func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) { vpaClientSet := getVpaClientSet(f) - _, err := vpaClientSet.AutoscalingV1beta2().VerticalPodAutoscalers(f.Namespace.Name).Create(vpa) + _, err := vpaClientSet.AutoscalingV1beta2().VerticalPodAutoscalers(f.Namespace.Name).Create(context.TODO(), vpa, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA") } // InstallRawVPA installs a VPA object passed in as raw json in the test cluster. func InstallRawVPA(f *framework.Framework, obj interface{}) error { vpaClientSet := getVpaClientSet(f) - err := vpaClientSet.AutoscalingV1beta2().RESTClient().Post(). + err := vpaClientSet.AutoscalingV1().RESTClient().Post(). Namespace(f.Namespace.Name). Resource("verticalpodautoscalers"). Body(obj). - Do() + Do(context.TODO()) return err.Error() } @@ -384,7 +385,7 @@ func PatchVpaRecommendation(f *framework.Framework, vpa *vpa_types.VerticalPodAu Value: *newStatus, }}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = getVpaClientSet(f).AutoscalingV1beta2().VerticalPodAutoscalers(f.Namespace.Name).Patch(vpa.Name, types.JSONPatchType, bytes) + _, err = getVpaClientSet(f).AutoscalingV1().VerticalPodAutoscalers(f.Namespace.Name).Patch(context.TODO(), vpa.Name, types.JSONPatchType, bytes, metav1.PatchOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch VPA.") } @@ -395,7 +396,7 @@ func AnnotatePod(f *framework.Framework, podName, annotationName, annotationValu Path: fmt.Sprintf("/metadata/annotations/%v", annotationName), Value: annotationValue, }}) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Patch(podName, types.JSONPatchType, bytes) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Patch(context.TODO(), podName, types.JSONPatchType, bytes, metav1.PatchOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch pod.") gomega.Expect(pod.Annotations[annotationName]).To(gomega.Equal(annotationValue)) } @@ -507,7 +508,7 @@ func WaitForVPAMatch(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutosc var polledVpa *vpa_types.VerticalPodAutoscaler err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - polledVpa, err = c.AutoscalingV1beta2().VerticalPodAutoscalers(vpa.Namespace).Get(vpa.Name, metav1.GetOptions{}) + polledVpa, err = c.AutoscalingV1beta2().VerticalPodAutoscalers(vpa.Namespace).Get(context.TODO(), vpa.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -583,7 +584,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC } lr.Spec.Limits = append(lr.Spec.Limits, lrItem) } - _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) + _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(context.TODO(), lr, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range") } diff --git a/vertical-pod-autoscaler/e2e/v1beta2/e2e.go b/vertical-pod-autoscaler/e2e/v1beta2/e2e.go index d7cdbda281ed..744861449509 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/e2e.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/e2e.go @@ -19,45 +19,66 @@ package autoscaling // This file is a cut down fork of k8s/test/e2e/e2e.go import ( + "context" + "fmt" + "io/ioutil" "os" "path" "testing" + "time" + + "k8s.io/klog" "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/reporters" "github.com/onsi/gomega" - "k8s.io/klog" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeutils "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/component-base/logs" + "k8s.io/component-base/version" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/manifest" + e2ereporters "k8s.io/kubernetes/test/e2e/reporters" + testutils "k8s.io/kubernetes/test/utils" + utilnet "k8s.io/utils/net" + clientset "k8s.io/client-go/kubernetes" // ensure auth plugins are loaded _ "k8s.io/client-go/plugin/pkg/client/auth" // ensure that cloud providers are loaded - _ "k8s.io/kubernetes/test/e2e/framework/providers/aws" - _ "k8s.io/kubernetes/test/e2e/framework/providers/azure" + _ "k8s.io/kubernetes/test/e2e/framework/providers/gce" - _ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark" - _ "k8s.io/kubernetes/test/e2e/framework/providers/openstack" - _ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere" +) + +const ( + // namespaceCleanupTimeout is how long to wait for the namespace to be deleted. + // If there are any orphaned namespaces to clean up, this test is running + // on a long lived cluster. A long wait here is preferably to spurious test + // failures caused by leaked resources from a previous test run. + namespaceCleanupTimeout = 15 * time.Minute ) var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { - framework.SetupSuite() + setupSuite() return nil }, func(data []byte) { // Run on all Ginkgo nodes - framework.SetupSuitePerGinkgoNode() + setupSuitePerGinkgoNode() }) var _ = ginkgo.SynchronizedAfterSuite(func() { - framework.CleanupSuite() + CleanupSuite() }, func() { - framework.AfterSuiteActions() + AfterSuiteActions() }) // RunE2ETests checks configuration parameters (specified through flags) and then runs @@ -70,7 +91,7 @@ func RunE2ETests(t *testing.T) { logs.InitLogs() defer logs.FlushLogs() - gomega.RegisterFailHandler(e2elog.Fail) + gomega.RegisterFailHandler(framework.Fail) // Disable skipped tests unless they are explicitly requested. if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" { config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]` @@ -87,7 +108,272 @@ func RunE2ETests(t *testing.T) { r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, "junit_01.xml"))) } } - klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode) + // Stream the progress to stdout and optionally a URL accepting progress updates. + r = append(r, e2ereporters.NewProgressReporter(framework.TestContext.ProgressReportURL)) + + // The DetailsRepoerter will output details about every test (name, files, lines, etc) which helps + // when documenting our tests. + if len(framework.TestContext.SpecSummaryOutput) > 0 { + r = append(r, e2ereporters.NewDetailsReporterFile(framework.TestContext.SpecSummaryOutput)) + } + + klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode) ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) } + +// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it +// to flip to Ready, log its output and delete it. +func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { + path := "test/images/clusterapi-tester/pod.yaml" + framework.Logf("Parsing pod from %v", path) + p, err := manifest.PodFromManifest(path) + if err != nil { + framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err) + return + } + p.Namespace = ns + if _, err := c.CoreV1().Pods(ns).Create(context.TODO(), p, metav1.CreateOptions{}); err != nil { + framework.Logf("Failed to create %v: %v", p.Name, err) + return + } + defer func() { + if err := c.CoreV1().Pods(ns).Delete(context.TODO(), p.Name, metav1.DeleteOptions{}); err != nil { + framework.Logf("Failed to delete pod %v: %v", p.Name, err) + } + }() + timeout := 5 * time.Minute + if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil { + framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err) + return + } + logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name) + if err != nil { + framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err) + } else { + framework.Logf("Output of clusterapi-tester:\n%v", logs) + } +} + +// getDefaultClusterIPFamily obtains the default IP family of the cluster +// using the Cluster IP address of the kubernetes service created in the default namespace +// This unequivocally identifies the default IP family because services are single family +// TODO: dual-stack may support multiple families per service +// but we can detect if a cluster is dual stack because pods have two addresses (one per family) +func getDefaultClusterIPFamily(c clientset.Interface) string { + // Get the ClusterIP of the kubernetes service created in the default namespace + svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + if err != nil { + framework.Failf("Failed to get kubernetes service ClusterIP: %v", err) + } + + if utilnet.IsIPv6String(svc.Spec.ClusterIP) { + return "ipv6" + } + return "ipv4" +} + +// waitForDaemonSets for all daemonsets in the given namespace to be ready +// (defined as all but 'allowedNotReadyNodes' pods associated with that +// daemonset are ready). +func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error { + start := time.Now() + framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", + timeout, ns) + + return wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { + dsList, err := c.AppsV1().DaemonSets(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + var notReadyDaemonSets []string + for _, ds := range dsList.Items { + framework.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds())) + if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes { + notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name) + } + } + + if len(notReadyDaemonSets) > 0 { + framework.Logf("there are not ready daemonsets: %v", notReadyDaemonSets) + return false, nil + } + + return true, nil + }) +} + +// setupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step. +// There are certain operations we only want to run once per overall test invocation +// (such as deleting old namespaces, or verifying that all system pods are running. +// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite +// to ensure that these operations only run on the first parallel Ginkgo node. +// +// This function takes two parameters: one function which runs on only the first Ginkgo node, +// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes, +// accepting the byte array. +func setupSuite() { + // Run only on Ginkgo node 1 + + switch framework.TestContext.Provider { + case "gce", "gke": + framework.LogClusterImageSources() + } + + c, err := framework.LoadClientset() + if err != nil { + klog.Fatal("Error loading client: ", err) + } + + // Delete any namespaces except those created by the system. This ensures no + // lingering resources are left over from a previous test run. + if framework.TestContext.CleanStart { + deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */ + []string{ + metav1.NamespaceSystem, + metav1.NamespaceDefault, + metav1.NamespacePublic, + v1.NamespaceNodeLease, + }) + if err != nil { + framework.Failf("Error deleting orphaned namespaces: %v", err) + } + klog.Infof("Waiting for deletion of the following namespaces: %v", deleted) + if err := framework.WaitForNamespacesDeleted(c, deleted, namespaceCleanupTimeout); err != nil { + framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) + } + } + + // In large clusters we may get to this point but still have a bunch + // of nodes without Routes created. Since this would make a node + // unschedulable, we need to wait until all of them are schedulable. + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + + // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted + if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes { + nodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) + framework.TestContext.CloudConfig.NumNodes = len(nodes.Items) + } + + // Ensure all pods are running and ready before starting tests (otherwise, + // cluster infrastructure pods that are being pulled or started can block + // test pods from running, and tests that ensure all pods are running and + // ready will fail). + podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout + // TODO: In large clusters, we often observe a non-starting pods due to + // #41007. To avoid those pods preventing the whole test runs (and just + // wasting the whole run), we allow for some not-ready pods (with the + // number equal to the number of allowed not-ready nodes). + if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { + framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) + e2ekubectl.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf) + runKubernetesServiceTestContainer(c, metav1.NamespaceDefault) + framework.Failf("Error waiting for all pods to be running and ready: %v", err) + } + + if err := waitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { + framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) + } + + // Log the version of the server and this client. + framework.Logf("e2e test version: %s", version.Get().GitVersion) + + dc := c.DiscoveryClient + + serverVersion, serverErr := dc.ServerVersion() + if serverErr != nil { + framework.Logf("Unexpected server error retrieving version: %v", serverErr) + } + if serverVersion != nil { + framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion) + } + + if framework.TestContext.NodeKiller.Enabled { + nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider) + go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh) + } +} + +// setupSuitePerGinkgoNode is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step. +// There are certain operations we only want to run once per overall test invocation on each Ginkgo node +// such as making some global variables accessible to all parallel executions +// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite +// Ref: https://onsi.github.io/ginkgo/#parallel-specs +func setupSuitePerGinkgoNode() { + // Obtain the default IP family of the cluster + // Some e2e test are designed to work on IPv4 only, this global variable + // allows to adapt those tests to work on both IPv4 and IPv6 + // TODO: dual-stack + // the dual stack clusters can be ipv4-ipv6 or ipv6-ipv4, order matters, + // and services use the primary IP family by default + c, err := framework.LoadClientset() + if err != nil { + klog.Fatal("Error loading client: ", err) + } + framework.TestContext.IPFamily = getDefaultClusterIPFamily(c) + framework.Logf("Cluster IP family: %s", framework.TestContext.IPFamily) +} + +// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step. +// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs). +// Here, the order of functions is reversed; first, the function which runs everywhere, +// and then the function that only runs on the first Ginkgo node. +func CleanupSuite() { + // Run on all Ginkgo nodes + framework.Logf("Running AfterSuite actions on all nodes") + framework.RunCleanupActions() +} + +// AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite +func AfterSuiteActions() { + // Run only Ginkgo on node 1 + framework.Logf("Running AfterSuite actions on node 1") + if framework.TestContext.ReportDir != "" { + framework.CoreDump(framework.TestContext.ReportDir) + } + if framework.TestContext.GatherSuiteMetricsAfterTest { + if err := gatherTestSuiteMetrics(); err != nil { + framework.Logf("Error gathering metrics: %v", err) + } + } + if framework.TestContext.NodeKiller.Enabled { + close(framework.TestContext.NodeKiller.NodeKillerStopCh) + } +} + +func gatherTestSuiteMetrics() error { + framework.Logf("Gathering metrics") + c, err := framework.LoadClientset() + if err != nil { + return fmt.Errorf("error loading client: %v", err) + } + + // Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally). + grabber, err := e2emetrics.NewMetricsGrabber(c, nil, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics) + if err != nil { + return fmt.Errorf("failed to create MetricsGrabber: %v", err) + } + + received, err := grabber.Grab() + if err != nil { + return fmt.Errorf("failed to grab metrics: %v", err) + } + + metricsForE2E := (*e2emetrics.ComponentCollection)(&received) + metricsJSON := metricsForE2E.PrintJSON() + if framework.TestContext.ReportDir != "" { + filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json") + if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil { + return fmt.Errorf("error writing to %q: %v", filePath, err) + } + } else { + framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON) + } + + return nil +} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/e2e_test.go b/vertical-pod-autoscaler/e2e/v1beta2/e2e_test.go index e71b9098ce14..084887e93406 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/e2e_test.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/e2e_test.go @@ -33,7 +33,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" "k8s.io/kubernetes/test/e2e/framework/testfiles" - "k8s.io/kubernetes/test/e2e/framework/viperconfig" "k8s.io/kubernetes/test/e2e/generated" "k8s.io/kubernetes/test/utils/image" ) @@ -55,7 +54,7 @@ func TestMain(m *testing.M) { // Now that we know which Viper config (if any) was chosen, // parse it and update those options which weren't already set via command line flags // (which have higher priority). - if err := viperconfig.ViperizeFlags(*viperConfig, "e2e", flag.CommandLine); err != nil { + if err := viperizeFlags(*viperConfig, "e2e", flag.CommandLine); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } diff --git a/vertical-pod-autoscaler/e2e/v1beta2/full_vpa.go b/vertical-pod-autoscaler/e2e/v1beta2/full_vpa.go index 40faa18d380d..41ceea13e69b 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/full_vpa.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/full_vpa.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "time" @@ -85,7 +86,7 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() { vpaClientSet = vpa_clientset.NewForConfigOrDie(config) vpaClient := vpaClientSet.AutoscalingV1beta2() - _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD) + _, err = vpaClient.VerticalPodAutoscalers(ns).Create(context.TODO(), vpaCRD, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -152,7 +153,7 @@ var _ = FullVpaE2eDescribe("OOMing pods under VPA", func() { vpaClientSet = vpa_clientset.NewForConfigOrDie(config) vpaClient := vpaClientSet.AutoscalingV1beta2() - _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD) + _, err = vpaClient.VerticalPodAutoscalers(ns).Create(context.TODO(), vpaCRD, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -174,7 +175,7 @@ func waitForPodsMatch(f *framework.Framework, timeout time.Duration, listOptions ns := f.Namespace.Name c := f.ClientSet - podList, err := c.CoreV1().Pods(ns).List(listOptions) + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), listOptions) if err != nil { return false, err } diff --git a/vertical-pod-autoscaler/e2e/v1beta2/recommender.go b/vertical-pod-autoscaler/e2e/v1beta2/recommender.go index 367e721e86c7..104da223b4b5 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/recommender.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/recommender.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "strings" "time" @@ -133,12 +134,12 @@ var _ = RecommenderE2eDescribe("Checkpoints", func() { }, } - _, err := vpaClientSet.AutoscalingV1beta2().VerticalPodAutoscalerCheckpoints(ns).Create(&checkpoint) + _, err := vpaClientSet.AutoscalingV1beta2().VerticalPodAutoscalerCheckpoints(ns).Create(context.TODO(), &checkpoint, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) time.Sleep(15 * time.Minute) - list, err := vpaClientSet.AutoscalingV1beta2().VerticalPodAutoscalerCheckpoints(ns).List(metav1.ListOptions{}) + list, err := vpaClientSet.AutoscalingV1beta2().VerticalPodAutoscalerCheckpoints(ns).List(context.TODO(), metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(list.Items).To(gomega.BeEmpty()) }) @@ -379,7 +380,7 @@ func createVpaCRDWithContainerScalingModes(f *framework.Framework, modes ...vpa_ func deleteRecommender(c clientset.Interface) error { namespace := "kube-system" listOptions := metav1.ListOptions{} - podList, err := c.CoreV1().Pods(namespace).List(listOptions) + podList, err := c.CoreV1().Pods(namespace).List(context.TODO(), listOptions) if err != nil { fmt.Println("Could not list pods.", err) return err @@ -388,7 +389,7 @@ func deleteRecommender(c clientset.Interface) error { for _, pod := range podList.Items { if strings.HasPrefix(pod.Name, "vpa-recommender") { fmt.Print("Deleting pod.", namespace, pod.Name) - err := c.CoreV1().Pods(namespace).Delete(pod.Name, &metav1.DeleteOptions{}) + err := c.CoreV1().Pods(namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) if err != nil { return err } diff --git a/vertical-pod-autoscaler/e2e/v1beta2/updater.go b/vertical-pod-autoscaler/e2e/v1beta2/updater.go index 3d4d0ddfc455..acbc003c70a6 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/updater.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "time" @@ -52,7 +53,7 @@ var _ = UpdaterE2eDescribe("Updater", func() { ginkgo.By("Deleting the Admission Controller status") close(stopCh) err := f.ClientSet.CoordinationV1().Leases(status.AdmissionControllerStatusNamespace). - Delete(status.AdmissionControllerStatusName, &metav1.DeleteOptions{}) + Delete(context.TODO(), status.AdmissionControllerStatusName, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() statusUpdater.Run(stopCh)