diff --git a/.github/workflows/prbuild.yaml b/.github/workflows/prbuild.yaml index e447d215efa..3c55f9ef346 100644 --- a/.github/workflows/prbuild.yaml +++ b/.github/workflows/prbuild.yaml @@ -21,7 +21,7 @@ jobs: - name: test run: | go mod vendor - make local-cluster install deploy test + make local-cluster test test-e2e codespell: name: Codespell runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index 1508ce0e897..832db2abd02 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,9 @@ VERSION ?= $(GIT_REF) OLD_VERSION ?= main NEW_VERSION ?= $(OLD_VERSION) +# Used as a go test argument for running e2e tests. +TEST ?= .* + # Image URL to use all building/pushing image targets IMAGE ?= docker.io/projectcontour/contour-operator @@ -190,3 +193,8 @@ release: ## Prepares a tagged release of the operator. .PHONY: release release: ./hack/release/make-release-tag.sh $(OLD_VERSION) $(NEW_VERSION) + +test-e2e: ## Runs e2e tests. +.PHONY: test-e2e +test-e2e: deploy + go test -timeout 20m -count 1 -v -tags e2e -run "$(TEST)" ./test/e2e diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 34f30e45bc2..b30333f406e 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -40,9 +40,9 @@ make manager This produces a `contour-operator` binary in your `$GOPATH/bin` directory and runs go fmt and go vet against the code. -### Running the unit tests +### Running the tests -You can run all the unit tests for the project: +To run all the unit tests for the project: ``` make check @@ -54,7 +54,20 @@ To run the tests for a single package, change to package directory and run: go test . ``` -__Note:__ Unit tests must pass for your PR to get merged. +The e2e tests require a Kubernetes cluster and uses your current cluster context. +To create a [kind](https://kind.sigs.k8s.io/) Kubernetes cluster: + +``` +make local-cluster +``` + +To run the e2e tests for the project: + +``` +make test-e2e +``` + +__Note:__ Unit and e2e tests must pass for your PR to get merged. ## Contribution workflow @@ -169,8 +182,7 @@ Operator CRDs and run the operator in the foreground: make run ``` -Before submitting your changes, follow the image-based deployment instructions to ensure the operator works as -expected within a Kubernetes cluster. +Before submitting your changes, run the [required tests](#Running-the-tests). ## DCO Sign off diff --git a/test/e2e/operator_test.go b/test/e2e/operator_test.go new file mode 100644 index 00000000000..1dac0ea985d --- /dev/null +++ b/test/e2e/operator_test.go @@ -0,0 +1,123 @@ +// Copyright Project Contour Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build e2e + +package e2e + +import ( + "context" + "os" + "testing" + "time" + + operatorv1alpha1 "github.com/projectcontour/contour-operator/api/v1alpha1" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + // kclient is the Kubernetes client used for e2e tests. + kclient client.Client + // ctx is an empty context used for client calls. + ctx = context.TODO() + // operatorName is the name of the operator. + operatorName = "contour-operator" + // operatorNs is the name of the operator's namespace. + operatorNs = "contour-operator" + // defaultContourNs is the default spec.namespace.name of a Contour. + defaultContourNs = "projectcontour" + // testUrl is the url used to test e2e functionality. + testUrl = "http://local.projectcontour.io/" + // expectedDeploymentConditions are the expected status conditions of a + // deployment. + expectedDeploymentConditions = []appsv1.DeploymentCondition{ + {Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue}, + } +) + +func TestMain(m *testing.M) { + cl, err := newClient() + if err != nil { + os.Exit(1) + } + kclient = cl + + os.Exit(m.Run()) +} + +func TestOperatorDeploymentAvailable(t *testing.T) { + t.Helper() + if err := waitForDeploymentStatusConditions(ctx, kclient, 3*time.Minute, operatorName, operatorNs, expectedDeploymentConditions...); err != nil { + t.Fatalf("failed to observe expected conditions for deployment %s/%s: %v", operatorNs, operatorName, err) + } + t.Logf("observed expected status conditions for deployment %s/%s", operatorNs, operatorName) +} + +func TestDefaultContour(t *testing.T) { + testName := "test-default-contour" + if err := newContour(ctx, kclient, testName, operatorNs); err != nil { + t.Fatalf("failed to create contour %s/%s: %v", operatorNs, testName, err) + } + t.Logf("created contour %s/%s", operatorNs, testName) + + expectedContourConditions := []metav1.Condition{ + {Type: operatorv1alpha1.ContourAvailableConditionType, Status: metav1.ConditionTrue}, + // TODO [danehans]: Update when additional status conditions are added to Contour. + } + if err := waitForContourStatusConditions(ctx, kclient, 5*time.Minute, testName, operatorNs, expectedContourConditions...); err != nil { + t.Fatalf("failed to observe expected status conditions for contour %s/%s: %v", operatorNs, testName, err) + } + t.Logf("observed expected status conditions for contour %s/%s", testName, operatorNs) + + // Create a sample workload for e2e testing. + appName := "kuard" + if err:= newDeployment(ctx, kclient, appName, defaultContourNs, "gcr.io/kuar-demo/kuard-amd64:1", 3); err != nil { + t.Fatalf("failed to create deployment %s/%s: %v", defaultContourNs, appName, err) + } + t.Logf("created deployment %s/%s", defaultContourNs, appName) + + if err := waitForDeploymentStatusConditions(ctx, kclient, 3*time.Minute, appName, defaultContourNs, expectedDeploymentConditions...); err != nil { + t.Fatalf("failed to observe expected status conditions for deployment %s/%s: %v", defaultContourNs, appName, err) + } + t.Logf("observed expected status conditions for deployment %s/%s", defaultContourNs, appName) + + if err := newClusterIPService(ctx, kclient, appName, defaultContourNs, 80, 8080); err != nil { + t.Fatalf("failed to create service %s/%s: %v", defaultContourNs, appName, err) + } + t.Logf("created service %s/%s", defaultContourNs, appName) + + if err := newIngress(ctx, kclient, appName, defaultContourNs, appName, 80); err != nil { + t.Fatalf("failed to create ingress %s/%s: %v", defaultContourNs, appName, err) + } + t.Logf("created ingress %s/%s", defaultContourNs, appName) + + if err := waitForHTTPResponse(testUrl, 1*time.Minute); err != nil { + t.Fatalf("failed to receive http response for %q: %v", testUrl, err) + } + t.Logf("received http response for %q", testUrl) + + // Ensure the default contour can be deleted and clean-up. + if err := deleteContour(ctx, kclient, 3*time.Minute, testName, operatorNs); err != nil { + t.Fatalf("failed to delete contour %s/%s: %v", operatorNs, testName, err) + } + + // Delete the operand namespace since contour.spec.namespace.removeOnDeletion + // defaults to false. + if err := deleteNamespace(ctx, kclient, 5*time.Minute, defaultContourNs); err != nil { + t.Fatalf("failed to delete namespace %s: %v", defaultContourNs, err) + } +} diff --git a/test/e2e/utils.go b/test/e2e/utils.go new file mode 100644 index 00000000000..73f861a4cba --- /dev/null +++ b/test/e2e/utils.go @@ -0,0 +1,233 @@ +// Copyright Project Contour Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build e2e + +package e2e + +import ( + "context" + "fmt" + "net/http" + "reflect" + "time" + + operatorv1alpha1 "github.com/projectcontour/contour-operator/api/v1alpha1" + oputil "github.com/projectcontour/contour-operator/util" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +var ( + scheme = runtime.NewScheme() +) + +func init() { + _ = clientgoscheme.AddToScheme(scheme) + _ = operatorv1alpha1.AddToScheme(scheme) +} + +func newClient() (client.Client, error) { + opts := client.Options{ + Scheme: scheme, + } + kubeClient, err := client.New(config.GetConfigOrDie(), opts) + if err != nil { + return nil, fmt.Errorf("failed to create kube client: %v\n", err) + } + return kubeClient, nil +} + +func newContour(ctx context.Context, cl client.Client, name, ns string) error { + cntr := oputil.NewContour(name, ns) + if err := cl.Create(ctx, cntr); err != nil { + return fmt.Errorf("failed to create contour %s/%s: %v", cntr.Namespace, cntr.Name, err) + } + return nil +} + +func deleteContour(ctx context.Context, cl client.Client, timeout time.Duration, name, ns string) error { + cntr := oputil.NewContour(name, ns) + if err := cl.Delete(ctx, cntr); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete contour %s/%s: %v", cntr.Namespace, cntr.Name, err) + } + } + + key := types.NamespacedName{ + Name: cntr.Name, + Namespace: cntr.Namespace, + } + + err := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { + if err := cl.Get(ctx, key, cntr); err != nil { + if errors.IsNotFound(err) { + return true, nil + } + return false, nil + } + return false, nil + }) + if err != nil { + return fmt.Errorf("timed out waiting for contour %s/%s to be deleted: %v", cntr.Namespace, cntr.Name, err) + } + return nil +} + +func newDeployment(ctx context.Context, cl client.Client, name, ns, image string, replicas int) error { + deploy := oputil.NewDeployment(name, ns, image, replicas) + if err := cl.Create(ctx, deploy); err != nil { + return fmt.Errorf("failed to create deployment %s/%s: %v", deploy.Namespace, deploy.Name, err) + } + return nil +} + +func newClusterIPService(ctx context.Context, cl client.Client, name, ns string, port, targetPort int) error { + svc := oputil.NewClusterIPService(ns, name, port, targetPort) + if err := cl.Create(ctx, svc); err != nil { + return fmt.Errorf("failed to create service %s/%s: %v", svc.Namespace, svc.Name, err) + } + return nil +} + +func newIngress(ctx context.Context, cl client.Client, name, ns, backendName string, backendPort int) error { + ing := oputil.NewIngress(name, ns, backendName, backendPort) + if err := cl.Create(ctx, ing); err != nil { + return fmt.Errorf("failed to create ingress %s/%s: %v", ing.Namespace, ing.Name, err) + } + return nil +} + +func waitForContourStatusConditions(ctx context.Context, cl client.Client, timeout time.Duration, name, ns string, conditions ...metav1.Condition) error { + nsName := types.NamespacedName{ + Namespace: ns, + Name: name, + } + return wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { + cntr := &operatorv1alpha1.Contour{} + if err := cl.Get(ctx, nsName, cntr); err != nil { + return false, nil + } + expected := contourConditionMap(conditions...) + current := contourConditionMap(cntr.Status.Conditions...) + return contourConditionsMatchExpected(expected, current), nil + }) +} + +func waitForDeploymentStatusConditions(ctx context.Context, cl client.Client, timeout time.Duration, name, ns string, conditions ...appsv1.DeploymentCondition) error { + nsName := types.NamespacedName{ + Namespace: ns, + Name: name, + } + return wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { + deploy := &appsv1.Deployment{} + if err := cl.Get(ctx, nsName, deploy); err != nil { + return false, nil + } + expected := deploymentConditionMap(conditions...) + current := deploymentConditionMap(deploy.Status.Conditions...) + return deploymentConditionsMatchExpected(expected, current), nil + }) +} + +func contourConditionMap(conditions ...metav1.Condition) map[string]string { + conds := map[string]string{} + for _, cond := range conditions { + conds[cond.Type] = string(cond.Status) + } + return conds +} + +func deploymentConditionMap(conditions ...appsv1.DeploymentCondition) map[appsv1.DeploymentConditionType]corev1.ConditionStatus { + conds := map[appsv1.DeploymentConditionType]corev1.ConditionStatus{} + for _, cond := range conditions { + conds[cond.Type] = cond.Status + } + return conds +} + +func contourConditionsMatchExpected(expected, actual map[string]string) bool { + filtered := map[string]string{} + for k := range actual { + if _, comparable := expected[k]; comparable { + filtered[k] = actual[k] + } + } + return reflect.DeepEqual(expected, filtered) +} + +func deploymentConditionsMatchExpected(expected, actual map[appsv1.DeploymentConditionType]corev1.ConditionStatus) bool { + filtered := map[appsv1.DeploymentConditionType]corev1.ConditionStatus{} + for k := range actual { + if _, comparable := expected[k]; comparable { + filtered[k] = actual[k] + } + } + return reflect.DeepEqual(expected, filtered) +} + +func waitForHTTPResponse(url string, timeout time.Duration) error { + var resp http.Response + method := "GET" + err := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { + req, _ := http.NewRequest(method, url, nil) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false, nil + } + if resp.StatusCode != 200 { + return false, nil + } + return true, nil + }) + if err != nil { + return fmt.Errorf("%s %q failed with status %s: %v", method, url, resp.Status, err) + } + return nil +} + +func deleteNamespace(ctx context.Context, cl client.Client, timeout time.Duration, name string) error { + ns := oputil.NewNamespace(name) + if err := cl.Delete(ctx, ns); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete namespace %s: %v", ns.Name, err) + } + } + + key := types.NamespacedName{ + Name: ns.Name, + } + + err := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { + if err := cl.Get(ctx, key, ns); err != nil { + if errors.IsNotFound(err) { + return true, nil + } + return false, nil + } + return false, nil + }) + if err != nil { + return fmt.Errorf("timed out waiting for namespace %s to be deleted: %v", ns.Name, err) + } + return nil +} diff --git a/util/object.go b/util/object.go index 798f3121024..42dc95c97bf 100644 --- a/util/object.go +++ b/util/object.go @@ -14,9 +14,14 @@ package util import ( + operatorv1alpha1 "github.com/projectcontour/contour-operator/api/v1alpha1" + + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // NewClusterRole makes a ClusterRole object using the provided name @@ -99,3 +104,94 @@ func NewUnprivilegedPodSecurity() *corev1.PodSecurityContext { RunAsNonRoot: &nonRoot, } } + +// NewNamespace makes a Namespace object using the provided name +// for the object's name. +func NewNamespace(name string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } +} + +// NewContour makes a Contour object using the provided ns/name +// for the object's namespace and name. +func NewContour(name, ns string) *operatorv1alpha1.Contour { + return &operatorv1alpha1.Contour{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + } +} + +// NewDeployment makes a Deployment object using the provided parameters. +func NewDeployment(name, ns, image string, replicas int) *appsv1.Deployment { + replInt32 := int32(replicas) + container := corev1.Container{ + Name: name, + Image: image, + } + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + Labels: map[string]string{"app": name}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replInt32, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + }, + }, + }, + } +} + +// NewClusterIPService makes a Service object of type ClusterIP +// with a single port/targetPort using the provided parameters. +func NewClusterIPService(ns, name string, port, targetPort int) *corev1.Service { + svcPort := corev1.ServicePort{ + Port: int32(port), + TargetPort: intstr.IntOrString{IntVal: int32(targetPort)}, + } + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + Labels: map[string]string{"app": name}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{svcPort}, + Selector: map[string]string{"app": name}, + Type: corev1.ServiceTypeClusterIP, + }, + } +} + +// NewIngress makes an Ingress using the provided ns/name for the +// object's namespace/name and backendName/backendPort as the name +// and port of the backend Service. +func NewIngress(name, ns, backendName string, backendPort int) *networkingv1beta1.Ingress { + return &networkingv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Labels: map[string]string{"app": name}, + }, + Spec: networkingv1beta1.IngressSpec{ + Backend: &networkingv1beta1.IngressBackend{ + ServiceName: backendName, + ServicePort: intstr.IntOrString{IntVal: int32(backendPort)}, + }, + }, + } +}