diff --git a/go.mod b/go.mod index 6ce2426a..c6a68aa8 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( google.golang.org/api v0.58.0 k8s.io/api v0.27.4 k8s.io/apimachinery v0.27.4 + k8s.io/apiserver v0.27.2 k8s.io/client-go v0.27.4 k8s.io/utils v0.0.0-20230209194617-a36077c30491 sigs.k8s.io/controller-runtime v0.15.1 diff --git a/go.sum b/go.sum index d07a3591..5107ac06 100644 --- a/go.sum +++ b/go.sum @@ -1215,6 +1215,8 @@ k8s.io/apimachinery v0.27.4 h1:CdxflD4AF61yewuid0fLl6bM4a3q04jWel0IlP+aYjs= k8s.io/apimachinery v0.27.4/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= k8s.io/apiserver v0.22.1/go.mod h1:2mcM6dzSt+XndzVQJX21Gx0/Klo7Aen7i0Ai6tIa400= +k8s.io/apiserver v0.27.2 h1:p+tjwrcQEZDrEorCZV2/qE8osGTINPuS5ZNqWAvKm5E= +k8s.io/apiserver v0.27.2/go.mod h1:EsOf39d75rMivgvvwjJ3OW/u9n1/BmUMK5otEOJrb1Y= k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= k8s.io/client-go v0.27.4 h1:vj2YTtSJ6J4KxaC88P4pMPEQECWMY8gqPqsTgUKzvjk= diff --git a/test/common/common.go b/test/common/common.go new file mode 100644 index 00000000..bc1d6a8e --- /dev/null +++ b/test/common/common.go @@ -0,0 +1,472 @@ +//go:build e2e +// +build e2e + +package common + +import ( + "bufio" + "context" + "fmt" + "os" + "strings" + "testing" + "time" + + operatorv1 "github.com/openshift/api/operator/v1" + routev1 "github.com/openshift/api/route/v1" + olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + kscheme "k8s.io/client-go/kubernetes/scheme" + + operatorv1alpha1 "github.com/openshift/external-dns-operator/api/v1alpha1" + operatorv1beta1 "github.com/openshift/external-dns-operator/api/v1beta1" + + "sigs.k8s.io/controller-runtime/pkg/client/config" + + "k8s.io/client-go/kubernetes" + + "github.com/miekg/dns" + "sigs.k8s.io/controller-runtime/pkg/client" + + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/utils/pointer" +) + +const ( + googleDNSServer = "8.8.8.8" + e2eSeparateOperandNsEnvVar = "E2E_SEPARATE_OPERAND_NAMESPACE" + operandNamespace = "external-dns" + rbacRsrcName = "external-dns-operator" + operatorServiceAccount = "external-dns-operator" + OperatorNamespace = "external-dns-operator" + dnsProviderEnvVar = "DNS_PROVIDER" + e2eSkipDNSProvidersEnvVar = "E2E_SKIP_DNS_PROVIDERS" + DnsPollingInterval = 15 * time.Second + DnsPollingTimeout = 3 * time.Minute +) + +var ( + KubeClient client.Client + KubeClientSet *kubernetes.Clientset + scheme *runtime.Scheme +) + +func init() { + initScheme() + + var err error + if err = initKubeClient(); err != nil { + fmt.Printf("Failed to initialize kube client: %v\n", err) + os.Exit(1) + } +} + +func DefaultService(name, namespace string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "external-dns.mydomain.org/publish": "yes", + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "name": "hello-openshift", + }, + Type: corev1.ServiceTypeLoadBalancer, + Ports: []corev1.ServicePort{ + { + Protocol: corev1.ProtocolTCP, + Port: 80, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8080, + }, + }, + }, + }, + } +} + +func MustGetEnv(name string) string { + val := os.Getenv(name) + if val == "" { + panic(fmt.Sprintf("environment variable %s must be set", name)) + } + return val +} + +func RootCredentials(name string) (map[string][]byte, error) { + secret := &corev1.Secret{} + secretName := types.NamespacedName{ + Name: name, + Namespace: "kube-system", + } + if err := KubeClient.Get(context.TODO(), secretName, secret); err != nil { + return nil, fmt.Errorf("failed to get credentials secret %s: %w", secretName.Name, err) + } + return secret.Data, nil +} + +func LookupARecord(host, server string) ([]string, error) { + dnsClient := &dns.Client{} + message := &dns.Msg{} + message.SetQuestion(dns.Fqdn(host), dns.TypeA) + response, _, err := dnsClient.Exchange(message, fmt.Sprintf("%s:53", server)) + if err != nil { + return nil, err + } + if len(response.Answer) == 0 { + return nil, fmt.Errorf("not found") + } + var ips []string + for _, ans := range response.Answer { + if aRec, ok := ans.(*dns.A); ok { + ips = append(ips, aRec.A.String()) + } + } + if len(ips) == 0 { + return nil, fmt.Errorf("not found") + } + return ips, nil +} + +// LookupARecordInternal queries for a DNS hostname inside the VPC of a cluster using a dig pod. It returns a +// map structure of the resolved IPs for easy lookup. +func LookupARecordInternal(ctx context.Context, t *testing.T, namespace, host string) (map[string]struct{}, error) { + t.Helper() + + // Create dig pod for querying DNS inside the cluster with random name + // to prevent naming collisions in case one is still being deleted. + digPodName := names.SimpleNameGenerator.GenerateName("digpod-") + clientPod := buildDigPod(digPodName, namespace, host) + if err := KubeClient.Create(ctx, clientPod); err != nil { + return nil, fmt.Errorf("failed to create pod %s/%s: %v", clientPod.Namespace, clientPod.Name, err) + } + defer func() { + _ = KubeClient.Delete(ctx, clientPod) + }() + + // Loop until dig pod starts, then parse logs for query results. + var responseCode string + var gotIPs map[string]struct{} + err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 10*time.Minute, true, func(ctx context.Context) (bool, error) { + if err := KubeClient.Get(ctx, types.NamespacedName{Name: clientPod.Name, Namespace: clientPod.Namespace}, clientPod); err != nil { + t.Logf("Failed to get pod %s/%s: %v", clientPod.Namespace, clientPod.Name, err) + return false, nil + } + switch clientPod.Status.Phase { + case corev1.PodRunning: + t.Log("Waiting for dig pod to finish") + return false, nil + case corev1.PodPending: + t.Log("Waiting for dig pod to start") + return false, nil + case corev1.PodFailed, corev1.PodSucceeded: + // Failed or Succeeded, let's continue on to check the logs. + break + default: + return true, fmt.Errorf("unhandled pod status type") + } + + // Get logs of the dig pod. + readCloser, err := KubeClientSet.CoreV1().Pods(clientPod.Namespace).GetLogs(clientPod.Name, &corev1.PodLogOptions{ + Container: clientPod.Spec.Containers[0].Name, + Follow: false, + }).Stream(ctx) + if err != nil { + t.Logf("Failed to read output from pod %s: %v (retrying)", clientPod.Name, err) + return false, nil + } + scanner := bufio.NewScanner(readCloser) + defer func() { + if err := readCloser.Close(); err != nil { + t.Fatalf("Failed to close reader for pod %s: %v", clientPod.Name, err) + } + }() + + gotIPs = make(map[string]struct{}) + for scanner.Scan() { + line := scanner.Text() + + // Skip blank lines. + if strings.TrimSpace(line) == "" { + continue + } + // Parse status out (helpful for future debugging) + if strings.HasPrefix(line, ";;") && strings.Contains(line, "status:") { + responseCodeSection := strings.TrimSpace(strings.Split(line, ",")[1]) + responseCode = strings.Split(responseCodeSection, " ")[1] + t.Logf("DNS Response Code: %s", responseCode) + } + // If it doesn't begin with ";", then we have an answer. + if !strings.HasPrefix(line, ";") { + splitAnswer := strings.Fields(line) + if len(splitAnswer) < 5 { + t.Logf("Expected dig answer to have 5 fields: %q", line) + return true, nil + } + gotIP := strings.Fields(line)[4] + gotIPs[gotIP] = struct{}{} + } + } + t.Logf("Got IPs: %v", gotIPs) + return true, nil + }) + if err != nil { + return nil, fmt.Errorf("failed to observe the expected dig results: %v", err) + } + + return gotIPs, nil +} + +// buildDigPod returns a pod definition for a pod with the given name and image +// and in the given namespace that digs the specified address. +func buildDigPod(name, namespace, address string, extraArgs ...string) *corev1.Pod { + digArgs := []string{ + address, + "A", + "+noall", + "+answer", + "+comments", + } + digArgs = append(digArgs, extraArgs...) + digArgs = append(digArgs, address) + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "dig", + Image: "openshift/origin-node", + Command: []string{"/bin/dig"}, + Args: digArgs, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + Privileged: pointer.Bool(false), + RunAsNonRoot: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + } +} + +// GetServiceIPs retrieves the provided service's IP or hostname and resolves the hostname to IPs (if applicable). +// Returns values are the serviceAddress (LoadBalancer IP or Hostname), the service resolved IPs, and an error +// (if applicable). +func GetServiceIPs(ctx context.Context, t *testing.T, timeout time.Duration, svcName types.NamespacedName) (string, map[string]struct{}, error) { + t.Helper() + + // Get the IPs of the loadbalancer which is created for the service + var serviceAddress string + serviceResolvedIPs := make(map[string]struct{}) + if err := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (done bool, err error) { + t.Log("Getting IPs of service's load balancer") + var service corev1.Service + err = KubeClient.Get(ctx, svcName, &service) + if err != nil { + return false, err + } + + // If there is no associated loadbalancer then retry later + if len(service.Status.LoadBalancer.Ingress) < 1 { + return false, nil + } + + // Get the IPs of the loadbalancer + if service.Status.LoadBalancer.Ingress[0].IP != "" { + serviceAddress = service.Status.LoadBalancer.Ingress[0].IP + serviceResolvedIPs[service.Status.LoadBalancer.Ingress[0].IP] = struct{}{} + } else if service.Status.LoadBalancer.Ingress[0].Hostname != "" { + lbHostname := service.Status.LoadBalancer.Ingress[0].Hostname + serviceAddress = lbHostname + ips, err := LookupARecord(lbHostname, googleDNSServer) + if err != nil { + t.Logf("Waiting for IP of loadbalancer %s", lbHostname) + // If the hostname cannot be resolved currently then retry later + return false, nil + } + for _, ip := range ips { + serviceResolvedIPs[ip] = struct{}{} + } + } else { + t.Logf("Waiting for loadbalancer details for service %s", svcName.Name) + return false, nil + } + t.Logf("Loadbalancer's IP(s): %v", serviceResolvedIPs) + return true, nil + }); err != nil { + return "", nil, fmt.Errorf("failed to get loadbalancer IPs for service %s/%s: %v", svcName.Name, svcName.Namespace, err) + } + + return serviceAddress, serviceResolvedIPs, nil +} + +func EnsureOperandResources(ctx context.Context) error { + if os.Getenv(e2eSeparateOperandNsEnvVar) != "true" { + return nil + } + + if err := ensureOperandNamespace(ctx); err != nil { + return fmt.Errorf("failed to create %s namespace: %v", operandNamespace, err) + } + + if err := ensureOperandRole(ctx); err != nil { + return fmt.Errorf("failed to create role external-dns-operator in ns %s: %v", operandNamespace, err) + } + + if err := ensureOperandRoleBinding(ctx); err != nil { + return fmt.Errorf("failed to create rolebinding external-dns-operator in ns %s: %v", operandNamespace, err) + } + + return nil +} + +func ensureOperandNamespace(ctx context.Context) error { + return KubeClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: operandNamespace}}) +} + +func ensureOperandRole(ctx context.Context) error { + rules := []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets", "serviceaccounts", "configmaps"}, + Verbs: []string{"get", "list", "watch", "create", "update", "delete"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"namespaces"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"apps"}, + Resources: []string{"deployments"}, + Verbs: []string{"get", "list", "watch", "create", "update", "delete"}, + }, + } + + role := rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: rbacRsrcName, + Namespace: operandNamespace, + }, + Rules: rules, + } + return KubeClient.Create(ctx, &role) +} + +func ensureOperandRoleBinding(ctx context.Context) error { + rb := rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: rbacRsrcName, + Namespace: operandNamespace, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: rbacRsrcName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: operatorServiceAccount, + Namespace: OperatorNamespace, + }, + }, + } + return KubeClient.Create(ctx, &rb) +} + +func initKubeClient() error { + kubeConfig, err := config.GetConfig() + if err != nil { + return fmt.Errorf("failed to get kube config: %v", err) + } + + KubeClient, err = client.New(kubeConfig, client.Options{}) + if err != nil { + return fmt.Errorf("failed to create kube client: %v", err) + } + + KubeClientSet, err = kubernetes.NewForConfig(kubeConfig) + if err != nil { + return fmt.Errorf("failed to create kube clientset: %v", err) + } + + return nil +} + +func IsOpenShift() bool { + return os.Getenv("OPENSHIFT_CI") != "" +} + +func GetPlatformType(isOpenShift bool) (string, error) { + var platformType string + if isOpenShift { + if dnsProvider := os.Getenv(dnsProviderEnvVar); dnsProvider != "" { + platformType = dnsProvider + } else { + var infraConfig configv1.Infrastructure + err := KubeClient.Get(context.Background(), types.NamespacedName{Name: "cluster"}, &infraConfig) + if err != nil { + return "", err + } + return string(infraConfig.Status.PlatformStatus.Type), nil + } + } else { + platformType = MustGetEnv(dnsProviderEnvVar) + } + return platformType, nil +} + +func SkipProvider(platformType string) bool { + if providersToSkip := os.Getenv(e2eSkipDNSProvidersEnvVar); len(providersToSkip) > 0 { + for _, provider := range strings.Split(providersToSkip, ",") { + if strings.EqualFold(provider, platformType) { + return true + } + } + } + return false +} + +func initScheme() { + scheme = kscheme.Scheme + if err := configv1.Install(scheme); err != nil { + panic(err) + } + if err := operatorv1alpha1.AddToScheme(scheme); err != nil { + panic(err) + } + if err := operatorv1beta1.AddToScheme(scheme); err != nil { + panic(err) + } + if err := operatorv1.Install(scheme); err != nil { + panic(err) + } + if err := routev1.Install(scheme); err != nil { + panic(err) + } + if err := olmv1alpha1.AddToScheme(scheme); err != nil { + panic(err) + } +} diff --git a/test/e2e/aws.go b/test/e2e/aws.go index 4dee2e05..3fe817e4 100644 --- a/test/e2e/aws.go +++ b/test/e2e/aws.go @@ -7,6 +7,8 @@ import ( "fmt" "time" + "github.com/openshift/external-dns-operator/test/common" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" @@ -14,7 +16,6 @@ import ( configv1 "github.com/openshift/api/config/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-dns-operator/api/v1alpha1" operatorv1beta1 "github.com/openshift/external-dns-operator/api/v1beta1" @@ -26,9 +27,9 @@ type awsTestHelper struct { secretKey string } -func newAWSHelper(isOpenShiftCI bool, kubeClient client.Client) (providerTestHelper, error) { +func newAWSHelper(isOpenShiftCI bool) (providerTestHelper, error) { provider := &awsTestHelper{} - if err := provider.prepareConfigurations(isOpenShiftCI, kubeClient); err != nil { + if err := provider.prepareConfigurations(isOpenShiftCI); err != nil { return nil, err } @@ -170,17 +171,17 @@ func (a *awsTestHelper) deleteHostedZone(zoneID, zoneDomain string) error { return nil } -func (a *awsTestHelper) prepareConfigurations(isOpenShiftCI bool, kubeClient client.Client) error { +func (a *awsTestHelper) prepareConfigurations(isOpenShiftCI bool) error { if isOpenShiftCI { - data, err := rootCredentials(kubeClient, "aws-creds") + data, err := common.RootCredentials("aws-creds") if err != nil { return fmt.Errorf("failed to get AWS credentials: %w", err) } a.keyID = string(data["aws_access_key_id"]) a.secretKey = string(data["aws_secret_access_key"]) } else { - a.keyID = mustGetEnv("AWS_ACCESS_KEY_ID") - a.secretKey = mustGetEnv("AWS_SECRET_ACCESS_KEY") + a.keyID = common.MustGetEnv("AWS_ACCESS_KEY_ID") + a.secretKey = common.MustGetEnv("AWS_SECRET_ACCESS_KEY") } return nil } diff --git a/test/e2e/azure.go b/test/e2e/azure.go index 62025306..6b0c6408 100644 --- a/test/e2e/azure.go +++ b/test/e2e/azure.go @@ -9,6 +9,8 @@ import ( "fmt" "strings" + "github.com/openshift/external-dns-operator/test/common" + "github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" @@ -16,7 +18,6 @@ import ( configv1 "github.com/openshift/api/config/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-dns-operator/api/v1alpha1" operatorv1beta1 "github.com/openshift/external-dns-operator/api/v1beta1" @@ -41,10 +42,10 @@ type azureTestHelper struct { // Build the necessary object for the provider test // for Azure Need the credentials ref clusterConfig -func newAzureHelper(kubeClient client.Client) (providerTestHelper, error) { +func newAzureHelper() (providerTestHelper, error) { azureProvider := &azureTestHelper{} - if err := azureProvider.prepareConfigurations(kubeClient); err != nil { + if err := azureProvider.prepareConfigurations(); err != nil { return nil, err } @@ -54,8 +55,8 @@ func newAzureHelper(kubeClient client.Client) (providerTestHelper, error) { return azureProvider, nil } -func (a *azureTestHelper) prepareConfigurations(kubeClient client.Client) (err error) { - data, err := rootCredentials(kubeClient, "azure-credentials") +func (a *azureTestHelper) prepareConfigurations() (err error) { + data, err := common.RootCredentials("azure-credentials") if err != nil { return fmt.Errorf("failed to get credentials secret, error : %v", err) } diff --git a/test/e2e/gcp.go b/test/e2e/gcp.go index 6eec456b..994ef0ca 100644 --- a/test/e2e/gcp.go +++ b/test/e2e/gcp.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" + "github.com/openshift/external-dns-operator/test/common" + configv1 "github.com/openshift/api/config/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -16,7 +18,6 @@ import ( "google.golang.org/api/dns/v1" "google.golang.org/api/option" - "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-dns-operator/api/v1alpha1" operatorv1beta1 "github.com/openshift/external-dns-operator/api/v1beta1" @@ -28,9 +29,9 @@ type gcpTestHelper struct { gcpProjectId string } -func newGCPHelper(isOpenShiftCI bool, kubeClient client.Client) (providerTestHelper, error) { +func newGCPHelper(isOpenShiftCI bool) (providerTestHelper, error) { provider := &gcpTestHelper{} - err := provider.prepareConfigurations(isOpenShiftCI, kubeClient) + err := provider.prepareConfigurations(isOpenShiftCI) if err != nil { return nil, err } @@ -168,27 +169,27 @@ func (g *gcpTestHelper) deleteHostedZone(zoneID, zoneDomain string) error { return nil } -func (a *gcpTestHelper) prepareConfigurations(openshiftCI bool, kubeClient client.Client) error { +func (a *gcpTestHelper) prepareConfigurations(openshiftCI bool) error { if openshiftCI { - data, err := rootCredentials(kubeClient, "gcp-credentials") + data, err := common.RootCredentials("gcp-credentials") if err != nil { return fmt.Errorf("failed to get GCP credentials: %w", err) } a.gcpCredentials = string(data["service_account.json"]) - a.gcpProjectId, err = getGCPProjectId(kubeClient) + a.gcpProjectId, err = getGCPProjectId() if err != nil { return fmt.Errorf("failed to get GCP project id: %w", err) } } else { - a.gcpCredentials = mustGetEnv("GCP_CREDENTIALS") - a.gcpProjectId = mustGetEnv("GCP_PROJECT_ID") + a.gcpCredentials = common.MustGetEnv("GCP_CREDENTIALS") + a.gcpProjectId = common.MustGetEnv("GCP_PROJECT_ID") } return nil } -func getGCPProjectId(kubeClient client.Client) (string, error) { +func getGCPProjectId() (string, error) { infraConfig := &configv1.Infrastructure{} - err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "cluster"}, infraConfig) + err := common.KubeClient.Get(context.Background(), types.NamespacedName{Name: "cluster"}, infraConfig) if err != nil { return "", err } diff --git a/test/e2e/infoblox.go b/test/e2e/infoblox.go index 41b3c2e6..4ce58a55 100644 --- a/test/e2e/infoblox.go +++ b/test/e2e/infoblox.go @@ -11,6 +11,8 @@ import ( "strconv" "strings" + "github.com/openshift/external-dns-operator/test/common" + ibclient "github.com/infobloxopen/infoblox-go-client" olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" @@ -52,10 +54,10 @@ type infobloxTestHelper struct { gridMasterHostname string } -func newInfobloxHelper(kubeClient client.Client) (*infobloxTestHelper, error) { +func newInfobloxHelper() (*infobloxTestHelper, error) { helper := &infobloxTestHelper{} - if err := helper.prepareConfigurations(kubeClient); err != nil { + if err := helper.prepareConfigurations(); err != nil { return nil, fmt.Errorf("failed to prepare infoblox helper: %w", err) } @@ -190,7 +192,7 @@ func (h *infobloxTestHelper) buildOpenShiftExternalDNSV1Alpha1(name, zoneID, zon return resource } -func (h *infobloxTestHelper) prepareConfigurations(kubeClient client.Client) error { +func (h *infobloxTestHelper) prepareConfigurations() error { configDir := os.Getenv(infobloxGridConfigDirEnvVar) if configDir != "" { host, err := os.ReadFile(configDir + "/" + defaultHostFilename) @@ -218,9 +220,9 @@ func (h *infobloxTestHelper) prepareConfigurations(kubeClient client.Client) err h.wapiPassword = string(password) h.gridMasterHostname = string(masterHostname) } else { - h.gridHost = mustGetEnv(infobloxGridHostEnvVar) - h.wapiUsername = mustGetEnv(infobloxWAPIUsernameEnvVar) - h.wapiPassword = mustGetEnv(infobloxWAPIPasswordEnvVar) + h.gridHost = common.MustGetEnv(infobloxGridHostEnvVar) + h.wapiUsername = common.MustGetEnv(infobloxWAPIUsernameEnvVar) + h.wapiPassword = common.MustGetEnv(infobloxWAPIPasswordEnvVar) h.gridMasterHostname = os.Getenv(infobloxGridMasterHostnameEnvVar) if h.gridMasterHostname == "" { // assume that grid host is a resolvable DNS name @@ -230,11 +232,11 @@ func (h *infobloxTestHelper) prepareConfigurations(kubeClient client.Client) err // TODO: only needed while we are using the temporary setup of Infoblox. // Must be removed once the setup is permanent and has the right certificate. - return h.trustGridTLSCert(kubeClient) + return h.trustGridTLSCert() } // trustGridTLSCert instructs the operator to trust Grid Master's self signed TLS certificate. -func (h *infobloxTestHelper) trustGridTLSCert(kubeClient client.Client) error { +func (h *infobloxTestHelper) trustGridTLSCert() error { // get Grid's TLS certificate as raw PEM encoded data certRaw, err := readServerTLSCert(net.JoinHostPort(h.gridHost, defaultWAPIPort), true) if err != nil { @@ -254,7 +256,7 @@ func (h *infobloxTestHelper) trustGridTLSCert(kubeClient client.Client) error { "ca-bundle.crt": string(certRaw), }, } - if err := kubeClient.Create(context.TODO(), trustedCAConfigMap); err != nil { + if err := common.KubeClient.Create(context.TODO(), trustedCAConfigMap); err != nil { return fmt.Errorf("failed to create trusted CA configmap %s/%s: %w", trustedCAConfigMap.Namespace, trustedCAConfigMap.Name, err) } @@ -267,7 +269,7 @@ func (h *infobloxTestHelper) trustGridTLSCert(kubeClient client.Client) error { // inject into subscription if there is one findOperatorSubscription := func() (*olmv1alpha1.Subscription, error) { list := &olmv1alpha1.SubscriptionList{} - if err := kubeClient.List(context.TODO(), list, client.InNamespace(operatorNs)); err != nil { + if err := common.KubeClient.List(context.TODO(), list, client.InNamespace(operatorNs)); err != nil { return nil, err } for _, sub := range list.Items { @@ -291,7 +293,7 @@ func (h *infobloxTestHelper) trustGridTLSCert(kubeClient client.Client) error { subscription.Spec.Config = &olmv1alpha1.SubscriptionConfig{} } subscription.Spec.Config.Env = ensureEnvVar(subscription.Spec.Config.Env, trustedCAEnvVar) - if err := kubeClient.Update(context.TODO(), subscription); err != nil { + if err := common.KubeClient.Update(context.TODO(), subscription); err != nil { return fmt.Errorf("failed to inject trusted CA environment variable into the subscription: %w", err) } return nil @@ -300,7 +302,7 @@ func (h *infobloxTestHelper) trustGridTLSCert(kubeClient client.Client) error { // no subscription was found, try to update the deployment directly findOperatorDeployment := func() (*appsv1.Deployment, error) { list := &appsv1.DeploymentList{} - if err := kubeClient.List(context.TODO(), list, client.InNamespace(operatorNs)); err != nil { + if err := common.KubeClient.List(context.TODO(), list, client.InNamespace(operatorNs)); err != nil { return nil, err } for _, depl := range list.Items { @@ -324,7 +326,7 @@ func (h *infobloxTestHelper) trustGridTLSCert(kubeClient client.Client) error { break } } - if err := kubeClient.Update(context.TODO(), deployment); err != nil { + if err := common.KubeClient.Update(context.TODO(), deployment); err != nil { return fmt.Errorf("failed to inject trusted CA environment variable into the deployment: %w", err) } diff --git a/test/e2e/operator_test.go b/test/e2e/operator_test.go index 81d33f53..cbd74efd 100644 --- a/test/e2e/operator_test.go +++ b/test/e2e/operator_test.go @@ -12,101 +12,46 @@ import ( "testing" "time" - operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/external-dns-operator/test/common" + + configv1 "github.com/openshift/api/config/v1" routev1 "github.com/openshift/api/route/v1" + + "github.com/openshift/external-dns-operator/pkg/version" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - kscheme "k8s.io/client-go/kubernetes/scheme" - - configv1 "github.com/openshift/api/config/v1" - olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - - operatorv1alpha1 "github.com/openshift/external-dns-operator/api/v1alpha1" - operatorv1beta1 "github.com/openshift/external-dns-operator/api/v1beta1" - "github.com/openshift/external-dns-operator/pkg/version" ) const ( - baseZoneDomain = "example-test.info" - testNamespace = "external-dns-test" - testServiceName = "test-service" - testRouteName = "test-route" - testExtDNSName = "test-extdns" - operandNamespace = "external-dns" - operatorNamespace = "external-dns-operator" - rbacRsrcName = "external-dns-operator" - operatorServiceAccount = "external-dns-operator" - dnsPollingInterval = 15 * time.Second - dnsPollingTimeout = 3 * time.Minute - googleDNSServer = "8.8.8.8" - infobloxDNSProvider = "INFOBLOX" - dnsProviderEnvVar = "DNS_PROVIDER" - e2eSkipDNSProvidersEnvVar = "E2E_SKIP_DNS_PROVIDERS" - e2eSeparateOperandNsEnvVar = "E2E_SEPARATE_OPERAND_NAMESPACE" + baseZoneDomain = "example-test.info" + testNamespace = "external-dns-test" + testServiceName = "test-service" + testRouteName = "test-route" + testExtDNSName = "test-extdns" ) var ( - kubeClient client.Client - scheme *runtime.Scheme nameServers []string hostedZoneID string helper providerTestHelper hostedZoneDomain = baseZoneDomain ) -func init() { - scheme = kscheme.Scheme - if err := configv1.Install(scheme); err != nil { - panic(err) - } - if err := operatorv1alpha1.AddToScheme(scheme); err != nil { - panic(err) - } - if err := operatorv1beta1.AddToScheme(scheme); err != nil { - panic(err) - } - if err := operatorv1.Install(scheme); err != nil { - panic(err) - } - if err := routev1.Install(scheme); err != nil { - panic(err) - } - if err := olmv1alpha1.AddToScheme(scheme); err != nil { - panic(err) - } -} - -func initKubeClient() error { - kubeConfig, err := config.GetConfig() - if err != nil { - return fmt.Errorf("failed to get kube config: %w", err) - } - - kubeClient, err = client.New(kubeConfig, client.Options{}) - if err != nil { - return fmt.Errorf("failed to create kube client: %w", err) - } - return nil -} - func initProviderHelper(openshiftCI bool, platformType string) (providerTestHelper, error) { switch platformType { case string(configv1.AWSPlatformType): - return newAWSHelper(openshiftCI, kubeClient) + return newAWSHelper(openshiftCI) case string(configv1.AzurePlatformType): - return newAzureHelper(kubeClient) + return newAzureHelper() case string(configv1.GCPPlatformType): - return newGCPHelper(openshiftCI, kubeClient) + return newGCPHelper(openshiftCI) case infobloxDNSProvider: - return newInfobloxHelper(kubeClient) + return newInfobloxHelper() default: return nil, fmt.Errorf("unsupported provider: %q", platformType) } @@ -118,33 +63,17 @@ func TestMain(m *testing.M) { platformType string openshiftCI bool ) - if err = initKubeClient(); err != nil { - fmt.Printf("Failed to init kube client: %v\n", err) - os.Exit(1) - } - if os.Getenv("OPENSHIFT_CI") != "" { - openshiftCI = true - if dnsProvider := os.Getenv(dnsProviderEnvVar); dnsProvider != "" { - platformType = dnsProvider - } else { - platformType, err = getPlatformType(kubeClient) - if err != nil { - fmt.Printf("Failed to determine platform type: %v\n", err) - os.Exit(1) - } - } - } else { - platformType = mustGetEnv(dnsProviderEnvVar) + openshiftCI = common.IsOpenShift() + platformType, err = common.GetPlatformType(openshiftCI) + if err != nil { + fmt.Printf("Failed to determine platform type: %v\n", err) + os.Exit(1) } - if providersToSkip := os.Getenv(e2eSkipDNSProvidersEnvVar); len(providersToSkip) > 0 { - for _, provider := range strings.Split(providersToSkip, ",") { - if strings.EqualFold(provider, platformType) { - fmt.Printf("Skipping e2e test for the provider %q!\n", provider) - os.Exit(0) - } - } + if common.SkipProvider(platformType) { + fmt.Printf("Skipping e2e test for the provider %q!\n", platformType) + os.Exit(0) } if version.SHORTCOMMIT != "" { @@ -163,8 +92,9 @@ func TestMain(m *testing.M) { os.Exit(1) } - if err := ensureOperandResources(); err != nil { + if err = common.EnsureOperandResources(context.TODO()); err != nil { fmt.Printf("Failed to ensure operand resources: %v\n", err) + os.Exit(1) } exitStatus := m.Run() @@ -182,14 +112,14 @@ func TestOperatorAvailable(t *testing.T) { expected := []appsv1.DeploymentCondition{ {Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue}, } - if err := waitForOperatorDeploymentStatusCondition(context.TODO(), t, kubeClient, expected...); err != nil { + if err := waitForOperatorDeploymentStatusCondition(context.TODO(), t, common.KubeClient, expected...); err != nil { t.Errorf("Did not get expected available condition: %v", err) } } func TestExternalDNSWithRoute(t *testing.T) { t.Log("Ensuring test namespace") - err := kubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) + err := common.KubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("Failed to ensure namespace %s: %v", testNamespace, err) } @@ -197,38 +127,38 @@ func TestExternalDNSWithRoute(t *testing.T) { // secret is needed only for DNS providers which cannot get their credentials from CCO // namely Infobox, BlueCat t.Log("Creating credentials secret") - credSecret := helper.makeCredentialsSecret(operatorNamespace) - err = kubeClient.Create(context.TODO(), credSecret) + credSecret := helper.makeCredentialsSecret(common.OperatorNamespace) + err = common.KubeClient.Create(context.TODO(), credSecret) if err != nil { t.Fatalf("Failed to create credentials secret %s/%s: %v", credSecret.Namespace, credSecret.Name, err) } t.Log("Creating external dns instance with source type route") extDNS := helper.buildOpenShiftExternalDNS(testExtDNSName, hostedZoneID, hostedZoneDomain, "", credSecret) - if err := kubeClient.Create(context.TODO(), &extDNS); err != nil { + if err := common.KubeClient.Create(context.TODO(), &extDNS); err != nil { t.Fatalf("Failed to create external DNS %q: %v", testExtDNSName, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), &extDNS) + _ = common.KubeClient.Delete(context.TODO(), &extDNS) }() // create a route with the annotation targeted by the ExternalDNS resource t.Log("Creating source route") testRouteHost := "myroute." + hostedZoneDomain route := testRoute(testRouteName, testNamespace, testRouteHost, testServiceName) - if err := kubeClient.Create(context.TODO(), route); err != nil { + if err := common.KubeClient.Create(context.TODO(), route); err != nil { t.Fatalf("Failed to create test route %s/%s: %v", testNamespace, testRouteName, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), route) + _ = common.KubeClient.Delete(context.TODO(), route) }() t.Logf("Created Route Host is %v", testRouteHost) // get the router canonical name var targetRoute routev1.Route - if err := wait.PollUntilContextTimeout(context.TODO(), dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.TODO(), common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { t.Log("Waiting for the route to be acknowledged by the router") - err = kubeClient.Get(ctx, types.NamespacedName{ + err = common.KubeClient.Get(ctx, types.NamespacedName{ Namespace: testNamespace, Name: testRouteName, }, &targetRoute) @@ -258,7 +188,7 @@ func TestExternalDNSWithRoute(t *testing.T) { t.Logf("Looking for DNS record in nameserver: %s", nameSrv) // verify dns records has been created for the route host. - if err := wait.PollUntilContextTimeout(context.TODO(), dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.TODO(), common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { cNameHost, err := lookupCNAME(testRouteHost, nameSrv) if err != nil { t.Logf("Waiting for DNS record: %s, error: %v", testRouteHost, err) @@ -280,77 +210,41 @@ func TestExternalDNSWithRoute(t *testing.T) { func TestExternalDNSRecordLifecycle(t *testing.T) { t.Log("Ensuring test namespace") - err := kubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) + err := common.KubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("Failed to ensure namespace %s: %v", testNamespace, err) } t.Log("Creating credentials secret") - credSecret := helper.makeCredentialsSecret(operatorNamespace) - err = kubeClient.Create(context.TODO(), credSecret) + credSecret := helper.makeCredentialsSecret(common.OperatorNamespace) + err = common.KubeClient.Create(context.TODO(), credSecret) if err != nil { t.Fatalf("Failed to create credentials secret %s/%s: %v", credSecret.Namespace, credSecret.Name, err) } t.Log("Creating external dns instance") extDNS := helper.buildExternalDNS(testExtDNSName, hostedZoneID, hostedZoneDomain, credSecret) - if err := kubeClient.Create(context.TODO(), &extDNS); err != nil { + if err := common.KubeClient.Create(context.TODO(), &extDNS); err != nil { t.Fatalf("Failed to create external DNS %q: %v", testExtDNSName, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), &extDNS) + _ = common.KubeClient.Delete(context.TODO(), &extDNS) }() // create a service of type LoadBalancer with the annotation targeted by the ExternalDNS resource t.Log("Creating source service") - service := defaultService(testServiceName, testNamespace) - if err := kubeClient.Create(context.TODO(), service); err != nil { + service := common.DefaultService(testServiceName, testNamespace) + if err := common.KubeClient.Create(context.TODO(), service); err != nil { t.Fatalf("Failed to create test service %s/%s: %v", testNamespace, testServiceName, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), service) + _ = common.KubeClient.Delete(context.TODO(), service) }() - serviceIPs := make(map[string]struct{}) - // get the IPs of the loadbalancer which is created for the service - if err := wait.PollUntilContextTimeout(context.TODO(), dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { - t.Log("Getting IPs of service's load balancer") - var service corev1.Service - err = kubeClient.Get(ctx, types.NamespacedName{ - Namespace: testNamespace, - Name: testServiceName, - }, &service) - if err != nil { - return false, err - } - - // if there is no associated loadbalancer then retry later - if len(service.Status.LoadBalancer.Ingress) < 1 { - return false, nil - } - - // get the IPs of the loadbalancer - if service.Status.LoadBalancer.Ingress[0].IP != "" { - serviceIPs[service.Status.LoadBalancer.Ingress[0].IP] = struct{}{} - } else if service.Status.LoadBalancer.Ingress[0].Hostname != "" { - lbHostname := service.Status.LoadBalancer.Ingress[0].Hostname - ips, err := lookupARecord(lbHostname, googleDNSServer) - if err != nil { - t.Logf("Waiting for IP of loadbalancer %s", lbHostname) - // if the hostname cannot be resolved currently then retry later - return false, nil - } - for _, ip := range ips { - serviceIPs[ip] = struct{}{} - } - } else { - t.Logf("Waiting for loadbalancer details for service %s", testServiceName) - return false, nil - } - t.Logf("Loadbalancer's IP(s): %v", serviceIPs) - return true, nil - }); err != nil { - t.Fatalf("Failed to get loadbalancer IPs for service %s/%s: %v", testNamespace, testServiceName, err) + // Get the resolved service IPs of the load balancer + _, serviceIPs, err := common.GetServiceIPs(context.TODO(), t, common.DnsPollingTimeout, types.NamespacedName{Name: testServiceName, Namespace: testNamespace}) + if err != nil { + t.Fatalf("failed to get service IPs %s/%s: %v", testNamespace, testServiceName, err) } // try all nameservers and fail only if all failed @@ -358,9 +252,9 @@ func TestExternalDNSRecordLifecycle(t *testing.T) { t.Logf("Looking for DNS record in nameserver: %s", nameSrv) // verify that the IPs of the record created by ExternalDNS match the IPs of loadbalancer obtained in the previous step. - if err := wait.PollUntilContextTimeout(context.TODO(), dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.TODO(), common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { expectedHost := fmt.Sprintf("%s.%s", testServiceName, hostedZoneDomain) - ips, err := lookupARecord(expectedHost, nameSrv) + ips, err := common.LookupARecord(expectedHost, nameSrv) if err != nil { t.Logf("Waiting for dns record: %s", expectedHost) return false, nil @@ -398,7 +292,7 @@ func TestExternalDNSRecordLifecycle(t *testing.T) { func TestExternalDNSCustomIngress(t *testing.T) { testIngressNamespace := "test-extdns-openshift-route" t.Logf("Ensuring test namespace %s", testIngressNamespace) - err := kubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testIngressNamespace}}) + err := common.KubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testIngressNamespace}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("Failed to ensure namespace %s: %v", testIngressNamespace, err) } @@ -409,18 +303,18 @@ func TestExternalDNSCustomIngress(t *testing.T) { ingDomain := fmt.Sprintf("%s.%s", name.Name, hostedZoneDomain) t.Log("Create custom ingress controller") ing := newHostNetworkController(name, ingDomain) - if err = kubeClient.Create(context.TODO(), ing); err != nil && !errors.IsAlreadyExists(err) { + if err = common.KubeClient.Create(context.TODO(), ing); err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("Failed to create ingresscontroller %s/%s: %v", name.Namespace, name.Name, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), ing) + _ = common.KubeClient.Delete(context.TODO(), ing) }() // secret is needed only for DNS providers which cannot get their credentials from CCO // namely Infobox, BlueCat t.Log("Creating credentials secret") - credSecret := helper.makeCredentialsSecret(operatorNamespace) - err = kubeClient.Create(context.TODO(), credSecret) + credSecret := helper.makeCredentialsSecret(common.OperatorNamespace) + err = common.KubeClient.Create(context.TODO(), credSecret) if err != nil { t.Fatalf("Failed to create credentials secret %s/%s: %v", credSecret.Namespace, credSecret.Name, err) } @@ -428,22 +322,22 @@ func TestExternalDNSCustomIngress(t *testing.T) { externalDnsServiceName := fmt.Sprintf("%s-source-as-openshift-route", testExtDNSName) t.Log("Creating external dns instance") extDNS := helper.buildOpenShiftExternalDNS(externalDnsServiceName, hostedZoneID, hostedZoneDomain, openshiftRouterName, credSecret) - if err = kubeClient.Create(context.TODO(), &extDNS); err != nil && !errors.IsAlreadyExists(err) { + if err = common.KubeClient.Create(context.TODO(), &extDNS); err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("Failed to create external DNS %q: %v", testExtDNSName, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), &extDNS) + _ = common.KubeClient.Delete(context.TODO(), &extDNS) }() routeName := types.NamespacedName{Namespace: testIngressNamespace, Name: "external-dns-route"} host := fmt.Sprintf("app.%s", ingDomain) route := testRoute(routeName.Name, routeName.Namespace, host, testServiceName) t.Log("Creating test route") - if err = kubeClient.Create(context.TODO(), route); err != nil { + if err = common.KubeClient.Create(context.TODO(), route); err != nil { t.Fatalf("Failed to create route %s/%s: %v", routeName.Namespace, routeName.Name, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), route) + _ = common.KubeClient.Delete(context.TODO(), route) }() canonicalName, err := fetchRouterCanonicalHostname(context.TODO(), t, routeName, ingDomain) @@ -457,7 +351,7 @@ func TestExternalDNSCustomIngress(t *testing.T) { func TestExternalDNSWithRouteV1Alpha1(t *testing.T) { t.Log("Ensuring test namespace") - err := kubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) + err := common.KubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("Failed to ensure namespace %s: %v", testNamespace, err) } @@ -465,38 +359,38 @@ func TestExternalDNSWithRouteV1Alpha1(t *testing.T) { // secret is needed only for DNS providers which cannot get their credentials from CCO // namely Infobox, BlueCat t.Log("Creating credentials secret") - credSecret := helper.makeCredentialsSecret(operatorNamespace) - err = kubeClient.Create(context.TODO(), credSecret) + credSecret := helper.makeCredentialsSecret(common.OperatorNamespace) + err = common.KubeClient.Create(context.TODO(), credSecret) if err != nil { t.Fatalf("Failed to create credentials secret %s/%s: %v", credSecret.Namespace, credSecret.Name, err) } t.Log("Creating external dns instance with source type route") extDNS := helper.buildOpenShiftExternalDNSV1Alpha1(testExtDNSName, hostedZoneID, hostedZoneDomain, "", credSecret) - if err := kubeClient.Create(context.TODO(), &extDNS); err != nil { + if err := common.KubeClient.Create(context.TODO(), &extDNS); err != nil { t.Fatalf("Failed to create external DNS %q: %v", testExtDNSName, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), &extDNS) + _ = common.KubeClient.Delete(context.TODO(), &extDNS) }() // create a route with the annotation targeted by the ExternalDNS resource t.Log("Creating source route") testRouteHost := "myroute." + hostedZoneDomain route := testRoute(testRouteName, testNamespace, testRouteHost, testServiceName) - if err := kubeClient.Create(context.TODO(), route); err != nil { + if err := common.KubeClient.Create(context.TODO(), route); err != nil { t.Fatalf("Failed to create test route %s/%s: %v", testNamespace, testRouteName, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), route) + _ = common.KubeClient.Delete(context.TODO(), route) }() t.Logf("Created Route Host is %v", testRouteHost) // get the router canonical name var targetRoute routev1.Route - if err := wait.PollUntilContextTimeout(context.TODO(), dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.TODO(), common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { t.Log("Waiting for the route to be acknowledged by the router") - err = kubeClient.Get(ctx, types.NamespacedName{ + err = common.KubeClient.Get(ctx, types.NamespacedName{ Namespace: testNamespace, Name: testRouteName, }, &targetRoute) @@ -526,7 +420,7 @@ func TestExternalDNSWithRouteV1Alpha1(t *testing.T) { t.Logf("Looking for DNS record in nameserver: %s", nameSrv) // verify dns records has been created for the route host. - if err := wait.PollUntilContextTimeout(context.TODO(), dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.TODO(), common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { cNameHost, err := lookupCNAME(testRouteHost, nameSrv) if err != nil { t.Logf("Waiting for DNS record: %s, error: %v", testRouteHost, err) @@ -551,77 +445,41 @@ func TestExternalDNSWithRouteV1Alpha1(t *testing.T) { func TestExternalDNSSecretCredentialUpdate(t *testing.T) { t.Log("Ensuring test namespace") testService := fmt.Sprintf("%s-credential-update", testServiceName) - err := kubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) + err := common.KubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("Failed to ensure namespace %s: %v", testNamespace, err) } t.Log("Creating wrong credentials secret") - credSecret := makeWrongCredentialsSecret(operatorNamespace) - err = kubeClient.Create(context.TODO(), credSecret) + credSecret := makeWrongCredentialsSecret(common.OperatorNamespace) + err = common.KubeClient.Create(context.TODO(), credSecret) if err != nil { t.Fatalf("Failed to create credentials secret %s/%s: %v", credSecret.Namespace, credSecret.Name, err) } t.Log("Creating external dns instance") extDNS := helper.buildExternalDNS(testExtDNSName, hostedZoneID, hostedZoneDomain, credSecret) - if err := kubeClient.Create(context.TODO(), &extDNS); err != nil { + if err := common.KubeClient.Create(context.TODO(), &extDNS); err != nil { t.Fatalf("Failed to create external DNS %q: %v", testExtDNSName, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), &extDNS) + _ = common.KubeClient.Delete(context.TODO(), &extDNS) }() // create a service of type LoadBalancer with the annotation targeted by the ExternalDNS resource t.Log("Creating source service") - service := defaultService(testService, testNamespace) - if err := kubeClient.Create(context.TODO(), service); err != nil { + service := common.DefaultService(testService, testNamespace) + if err := common.KubeClient.Create(context.TODO(), service); err != nil { t.Fatalf("Failed to create test service %s/%s: %v", testNamespace, testService, err) } defer func() { - _ = kubeClient.Delete(context.TODO(), service) + _ = common.KubeClient.Delete(context.TODO(), service) }() - serviceIPs := make(map[string]struct{}) - // get the IPs of the loadbalancer which is created for the service - if err := wait.PollUntilContextTimeout(context.TODO(), dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { - t.Log("Getting IPs of service's load balancer") - var service corev1.Service - err = kubeClient.Get(ctx, types.NamespacedName{ - Namespace: testNamespace, - Name: testService, - }, &service) - if err != nil { - return false, err - } - - // if there is no associated loadbalancer then retry later - if len(service.Status.LoadBalancer.Ingress) < 1 { - return false, nil - } - - // get the IPs of the loadbalancer - if service.Status.LoadBalancer.Ingress[0].IP != "" { - serviceIPs[service.Status.LoadBalancer.Ingress[0].IP] = struct{}{} - } else if service.Status.LoadBalancer.Ingress[0].Hostname != "" { - lbHostname := service.Status.LoadBalancer.Ingress[0].Hostname - ips, err := lookupARecord(lbHostname, googleDNSServer) - if err != nil { - t.Logf("Waiting for IP of loadbalancer %s", lbHostname) - // if the hostname cannot be resolved currently then retry later - return false, nil - } - for _, ip := range ips { - serviceIPs[ip] = struct{}{} - } - } else { - t.Logf("Waiting for loadbalancer details for service %s", testService) - return false, nil - } - t.Logf("Loadbalancer's IP(s): %v", serviceIPs) - return true, nil - }); err != nil { - t.Fatalf("Failed to get loadbalancer IPs for service %s/%s: %v", testNamespace, testService, err) + // Get the resolved service IPs of the load balancer + _, serviceIPs, err := common.GetServiceIPs(context.TODO(), t, common.DnsPollingTimeout, types.NamespacedName{Name: testService, Namespace: testNamespace}) + if err != nil { + t.Fatalf("failed to get service IPs %s/%s: %v", testNamespace, testServiceName, err) } dnsCheck := make(chan bool) @@ -630,9 +488,9 @@ func TestExternalDNSSecretCredentialUpdate(t *testing.T) { for _, nameSrv := range nameServers { t.Logf("Looking for DNS record in nameserver: %s", nameSrv) // verify that the IPs of the record created by ExternalDNS match the IPs of loadbalancer obtained in the previous step. - if err := wait.PollUntilContextTimeout(context.TODO(), dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.TODO(), common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { expectedHost := fmt.Sprintf("%s.%s", testService, hostedZoneDomain) - ips, err := lookupARecord(expectedHost, nameSrv) + ips, err := common.LookupARecord(expectedHost, nameSrv) if err != nil { t.Logf("Waiting for dns record: %s", expectedHost) return false, nil @@ -645,6 +503,7 @@ func TestExternalDNSSecretCredentialUpdate(t *testing.T) { // If all IPs of the loadbalancer are not present query again. if len(gotIPs) < len(serviceIPs) { + t.Logf("Expected %d IPs, but got %d, retrying...", len(serviceIPs), len(gotIPs)) return false, nil } // all expected IPs should be in the received IPs @@ -666,8 +525,8 @@ func TestExternalDNSSecretCredentialUpdate(t *testing.T) { }() t.Logf("Updating credentials secret") - credSecret.Data = helper.makeCredentialsSecret(operatorNamespace).Data - err = kubeClient.Update(context.TODO(), credSecret) + credSecret.Data = helper.makeCredentialsSecret(common.OperatorNamespace).Data + err = common.KubeClient.Update(context.TODO(), credSecret) if err != nil { t.Fatalf("Failed to update credentials secret %s/%s: %v", credSecret.Namespace, credSecret.Name, err) } @@ -680,87 +539,12 @@ func TestExternalDNSSecretCredentialUpdate(t *testing.T) { // HELPER FUNCTIONS -func ensureOperandResources() error { - if os.Getenv(e2eSeparateOperandNsEnvVar) != "true" { - return nil - } - - if err := ensureOperandNamespace(); err != nil { - return fmt.Errorf("Failed to create %s namespace: %v\n", operandNamespace, err) - } - - if err := ensureOperandRole(); err != nil { - return fmt.Errorf("Failed to create role external-dns-operator in ns %s: %v\n", operandNamespace, err) - } - - if err := ensureOperandRoleBinding(); err != nil { - return fmt.Errorf("Failed to create rolebinding external-dns-operator in ns %s: %v\n", operandNamespace, err) - } - - return nil -} - -func ensureOperandNamespace() error { - return kubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: operandNamespace}}) -} - -func ensureOperandRole() error { - rules := []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"secrets", "serviceaccounts", "configmaps"}, - Verbs: []string{"get", "list", "watch", "create", "update", "delete"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"namespaces"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"apps"}, - Resources: []string{"deployments"}, - Verbs: []string{"get", "list", "watch", "create", "update", "delete"}, - }, - } - - role := rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Name: rbacRsrcName, - Namespace: operandNamespace, - }, - Rules: rules, - } - return kubeClient.Create(context.TODO(), &role) -} - -func ensureOperandRoleBinding() error { - rb := rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: rbacRsrcName, - Namespace: operandNamespace, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "Role", - Name: rbacRsrcName, - }, - Subjects: []rbacv1.Subject{ - { - Kind: rbacv1.ServiceAccountKind, - Name: operatorServiceAccount, - Namespace: operatorNamespace, - }, - }, - } - return kubeClient.Create(context.TODO(), &rb) -} - func verifyCNAMERecordForOpenshiftRoute(ctx context.Context, t *testing.T, canonicalName, host string) { // try all nameservers and fail only if all failed recordExist := false for _, nameSrv := range nameServers { t.Logf("Looking for cname record in nameserver: %s", nameSrv) - if err := wait.PollUntilContextTimeout(ctx, dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(ctx, common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { cname, err := lookupCNAME(host, nameSrv) if err != nil { t.Logf("Cname lookup failed for nameserver: %s , error: %v", nameSrv, err) @@ -784,8 +568,8 @@ func verifyCNAMERecordForOpenshiftRoute(ctx context.Context, t *testing.T, canon func fetchRouterCanonicalHostname(ctx context.Context, t *testing.T, routeName types.NamespacedName, routerDomain string) (string, error) { route := routev1.Route{} canonicalName := "" - if err := wait.PollUntilContextTimeout(ctx, dnsPollingInterval, dnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { - err = kubeClient.Get(ctx, types.NamespacedName{ + if err := wait.PollUntilContextTimeout(ctx, common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + err = common.KubeClient.Get(ctx, types.NamespacedName{ Namespace: routeName.Namespace, Name: routeName.Name, }, &route) diff --git a/test/e2e/util.go b/test/e2e/util.go index 555cbb9d..4d73d4c2 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -13,7 +13,6 @@ import ( "io" "math/rand" "net/http" - "os" "reflect" "testing" "time" @@ -22,14 +21,12 @@ import ( "github.com/miekg/dns" "sigs.k8s.io/controller-runtime/pkg/client" - configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/pointer" @@ -38,6 +35,10 @@ import ( "github.com/openshift/external-dns-operator/pkg/utils" ) +const ( + infobloxDNSProvider = "INFOBLOX" +) + type providerTestHelper interface { ensureHostedZone(string) (string, []string, error) deleteHostedZone(string, string) error @@ -57,19 +58,6 @@ func randomString(n int) string { return string(str) } -func getPlatformType(kubeClient client.Client) (string, error) { - var infraConfig configv1.Infrastructure - err := kubeClient.Get(context.Background(), types.NamespacedName{Name: "cluster"}, &infraConfig) - if err != nil { - return "", err - } - return string(infraConfig.Status.PlatformStatus.Type), nil -} - -func defaultService(name, namespace string) *corev1.Service { - return testService(name, namespace, corev1.ServiceTypeLoadBalancer) -} - func testRoute(name, namespace, host, svcName string) *routev1.Route { return &routev1.Route{ ObjectMeta: metav1.ObjectMeta{ @@ -88,42 +76,6 @@ func testRoute(name, namespace, host, svcName string) *routev1.Route { } } -func testService(name, namespace string, svcType corev1.ServiceType) *corev1.Service { - return &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - "external-dns.mydomain.org/publish": "yes", - }, - }, - Spec: corev1.ServiceSpec{ - Selector: map[string]string{ - "name": "hello-openshift", - }, - Type: svcType, - Ports: []corev1.ServicePort{ - { - Protocol: corev1.ProtocolTCP, - Port: 80, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 8080, - }, - }, - }, - }, - } -} - -func mustGetEnv(name string) string { - val := os.Getenv(name) - if val == "" { - panic(fmt.Sprintf("environment variable %s must be set", name)) - } - return val -} - func deploymentConditionMap(conditions ...appsv1.DeploymentCondition) map[string]string { conds := map[string]string{} for _, cond := range conditions { @@ -238,18 +190,6 @@ func routeExternalDNSV1Alpha1(name, zoneID, zoneDomain, routerName string) opera return extDns } -func rootCredentials(kubeClient client.Client, name string) (map[string][]byte, error) { - secret := &corev1.Secret{} - secretName := types.NamespacedName{ - Name: name, - Namespace: "kube-system", - } - if err := kubeClient.Get(context.TODO(), secretName, secret); err != nil { - return nil, fmt.Errorf("failed to get credentials secret %s: %w", secretName.Name, err) - } - return secret.Data, nil -} - // lookupCNAME retrieves the first canonical name of the given host. // This function is different from net.LookupCNAME. // net.LookupCNAME assumes the nameserver used is the recursive resolver (https://github.com/golang/go/blob/master/src/net/dnsclient_unix.go#L637). @@ -277,29 +217,6 @@ func lookupCNAME(host, server string) (string, error) { return cname.Target, nil } -func lookupARecord(host, server string) ([]string, error) { - dnsClient := &dns.Client{} - message := &dns.Msg{} - message.SetQuestion(dns.Fqdn(host), dns.TypeA) - response, _, err := dnsClient.Exchange(message, fmt.Sprintf("%s:53", server)) - if err != nil { - return nil, err - } - if len(response.Answer) == 0 { - return nil, fmt.Errorf("not found") - } - var ips []string - for _, ans := range response.Answer { - if aRec, ok := ans.(*dns.A); ok { - ips = append(ips, aRec.A.String()) - } - } - if len(ips) == 0 { - return nil, fmt.Errorf("not found") - } - return ips, nil -} - func equalFQDN(name1, name2 string) bool { return dns.Fqdn(name1) == dns.Fqdn(name2) } diff --git a/test/e2e_awssharedvpc/aws_shared_vpc_test.go b/test/e2e_awssharedvpc/aws_shared_vpc_test.go new file mode 100644 index 00000000..991c6ce6 --- /dev/null +++ b/test/e2e_awssharedvpc/aws_shared_vpc_test.go @@ -0,0 +1,291 @@ +//go:build e2e +// +build e2e + +package e2e_awssharedvpc + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/openshift/external-dns-operator/pkg/utils" + "github.com/openshift/external-dns-operator/test/common" + + configv1 "github.com/openshift/api/config/v1" + + operatorv1beta1 "github.com/openshift/external-dns-operator/api/v1beta1" +) + +const ( + testNamespace = "external-dns-test" + testServiceName = "test-service" + testExtDNSName = "test-extdns" +) + +var ( + r53ClientAssumeRole *route53.Route53 + roleARN string + hostedZoneID string + hostedZoneDomain string +) + +func TestMain(m *testing.M) { + var ( + err error + platformType string + openshiftCI bool + ) + + openshiftCI = common.IsOpenShift() + platformType, err = common.GetPlatformType(openshiftCI) + if err != nil { + fmt.Printf("Failed to determine platform type: %v\n", err) + os.Exit(1) + } + + if common.SkipProvider(platformType) { + fmt.Printf("Skipping e2e test for the provider %q!\n", platformType) + os.Exit(0) + } + + // Only run this test if the DNS config contains the privateZoneIAMRole which indicates it's a "Shared VPC" cluster. + // Note: Only AWS supports privateZoneIAMRole. + dnsConfig := configv1.DNS{} + err = common.KubeClient.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, &dnsConfig) + if err != nil { + fmt.Printf("Failed to get dns 'cluster': %v\n", err) + os.Exit(1) + } + if dnsConfig.Spec.Platform.AWS == nil || dnsConfig.Spec.Platform.AWS.PrivateZoneIAMRole == "" { + fmt.Printf("Test skipped on non-shared-VPC cluster\n") + os.Exit(0) + } + roleARN = dnsConfig.Spec.Platform.AWS.PrivateZoneIAMRole + hostedZoneID = dnsConfig.Spec.PrivateZone.ID + hostedZoneDomain = dnsConfig.Spec.BaseDomain + + if r53ClientAssumeRole, err = initRoute53ClientAssumeRole(openshiftCI, roleARN); err != nil { + fmt.Printf("Failed to initialize Route 53 Client: %v\n", err) + os.Exit(1) + } + + if err = common.EnsureOperandResources(context.TODO()); err != nil { + fmt.Printf("Failed to ensure operand resources: %v\n", err) + os.Exit(1) + } + + os.Exit(m.Run()) +} + +// TestExternalDNSAssumeRole tests the assumeRole functionality in which you can specify a Role ARN to use another +// account's hosted zone for creating DNS records. Only AWS is supported. +func TestExternalDNSAssumeRole(t *testing.T) { + t.Log("Ensuring test namespace") + err := common.KubeClient.Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) + if err != nil && !errors.IsAlreadyExists(err) { + t.Fatalf("Failed to ensure namespace %s: %v", testNamespace, err) + } + + // Create an External object that uses the role ARN in the dns config to create DNS records in the private DNS + // zone in another AWS account route 53. + t.Log("Creating ExternalDNS object that assumes role of private zone in another account's route 53") + extDNS := buildExternalDNSAssumeRole(testExtDNSName, hostedZoneID, hostedZoneDomain, roleARN) + if err := common.KubeClient.Create(context.TODO(), extDNS); err != nil { + t.Fatalf("Failed to create external DNS %q: %v", testExtDNSName, err) + } + defer func() { + _ = common.KubeClient.Delete(context.TODO(), extDNS) + }() + + // Create a service of type LoadBalancer with the annotation targeted by the ExternalDNS resource. + t.Log("Creating source service") + service := common.DefaultService(testServiceName, testNamespace) + if err := common.KubeClient.Create(context.TODO(), service); err != nil { + t.Fatalf("Failed to create test service %s/%s: %v", testNamespace, testServiceName, err) + } + defer func() { + _ = common.KubeClient.Delete(context.TODO(), service) + }() + + // Get the service address (hostname or IP) and the resolved service IPs of the load balancer. + serviceAddress, serviceIPs, err := common.GetServiceIPs(context.TODO(), t, common.DnsPollingTimeout, types.NamespacedName{Name: testServiceName, Namespace: testNamespace}) + if err != nil { + t.Fatalf("failed to get service IPs %s/%s: %v", testNamespace, testServiceName, err) + } + + // Query Route 53 API with assume role ARN from the dns config, then compare the results to ensure it matches the + // service hostname. + t.Logf("Querying Route 53 API to confirm DNS record exists in a different AWS account") + expectedHost := fmt.Sprintf("%s.%s", testServiceName, hostedZoneDomain) + if err := wait.PollUntilContextTimeout(context.TODO(), common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + recordValues, err := getResourceRecordValues(hostedZoneID, expectedHost, "A") + if err != nil { + t.Logf("Failed to get DNS record for shared VPC zone: %v", err) + return false, nil + } else if len(recordValues) == 0 { + t.Logf("No DNS records with name %q", expectedHost) + return false, nil + } + + if _, found := recordValues[serviceAddress]; !found { + if _, foundWithDot := recordValues[serviceAddress+"."]; !foundWithDot { + t.Logf("DNS record with name %q didn't contain expected service IP %q", expectedHost, serviceAddress) + return false, nil + } + } + t.Logf("DNS record with name %q found in shared Route 53 private zone %q and matched service IPs", expectedHost, hostedZoneID) + return true, nil + }); err != nil { + t.Fatal("Failed to verify that DNS has been correctly set in a different account.") + } + + t.Log("Querying for DNS record inside cluster VPC using a dig pod") + + // Verify that the IPs of the record created by ExternalDNS match the IPs of load balancer obtained in the previous + // step. We will start a pod that runs a dig command for the expected hostname and parse the dig output. + if err := wait.PollUntilContextTimeout(context.TODO(), common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + gotIPs, err := common.LookupARecordInternal(ctx, t, testNamespace, expectedHost) + if err != nil { + t.Logf("Failed to query hostname inside cluster: %v", err) + return false, nil + } + + // If all IPs of the loadbalancer are not present, query again. + if len(gotIPs) == 0 { + t.Log("Failed to resolve any IPs for the DNS record, retrying...") + return false, nil + } + if len(gotIPs) < len(serviceIPs) { + t.Logf("Expected %d IPs, but got %d, retrying...", len(serviceIPs), len(gotIPs)) + return false, nil + } + // All expected IPs should be in the received IPs, + // but these 2 sets are not necessary equal. + for ip := range serviceIPs { + if _, found := gotIPs[ip]; !found { + return false, nil + } + } + t.Log("Expected IPs are equal to IPs resolved.") + return true, nil + }); err != nil { + t.Fatalf("Failed to verify that DNS has been correctly set: %v", err) + } + + // Delete the service and make sure ExternalDNS cleans up the DNS Records + t.Log("Deleting service and verifying ExternalDNS deletes DNS records.") + if err = common.KubeClient.Delete(context.TODO(), service); err != nil { + t.Fatalf("Error deleting service %s/%s: %v", service.Namespace, service.Name, err) + } + if err = wait.PollUntilContextTimeout(context.TODO(), common.DnsPollingInterval, common.DnsPollingTimeout, true, func(ctx context.Context) (done bool, err error) { + recordValues, err := getResourceRecordValues(hostedZoneID, expectedHost, "A") + if len(recordValues) != 0 { + t.Logf("Waiting for DNS record %q to be deleted.", expectedHost) + return false, nil + } + return true, nil + }); err != nil { + t.Fatalf("Failed to verify that ExternalDNS deleted DNS records: %v", err) + } +} + +// buildExternalDNSAssumeRole builds a ExternalDNS object for a shared VPC cluster test. +func buildExternalDNSAssumeRole(name, zoneID, zoneDomain, roleArn string) *operatorv1beta1.ExternalDNS { + return &operatorv1beta1.ExternalDNS{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: operatorv1beta1.ExternalDNSSpec{ + Zones: []string{zoneID}, + Source: operatorv1beta1.ExternalDNSSource{ + ExternalDNSSourceUnion: operatorv1beta1.ExternalDNSSourceUnion{ + Type: operatorv1beta1.SourceTypeService, + Service: &operatorv1beta1.ExternalDNSServiceSourceOptions{ + ServiceType: []corev1.ServiceType{ + corev1.ServiceTypeLoadBalancer, + corev1.ServiceTypeClusterIP, + }, + }, + LabelFilter: utils.MustParseLabelSelector("external-dns.mydomain.org/publish=yes"), + }, + HostnameAnnotationPolicy: operatorv1beta1.HostnameAnnotationPolicyIgnore, + FQDNTemplate: []string{fmt.Sprintf("{{.Name}}.%s", zoneDomain)}, + }, + Provider: operatorv1beta1.ExternalDNSProvider{ + Type: operatorv1beta1.ProviderTypeAWS, + AWS: &operatorv1beta1.ExternalDNSAWSProviderOptions{ + AssumeRole: &operatorv1beta1.ExternalDNSAWSAssumeRoleOptions{ + ARN: roleArn, + }, + }, + }, + }, + } +} + +// initRoute53ClientAssumeRole initializes a Route 53 client with an assumed role. +func initRoute53ClientAssumeRole(isOpenShiftCI bool, roleARN string) (*route53.Route53, error) { + var keyID, secretKey string + if isOpenShiftCI { + data, err := common.RootCredentials("aws-creds") + if err != nil { + return nil, fmt.Errorf("failed to get AWS credentials: %w", err) + } + keyID = string(data["aws_access_key_id"]) + secretKey = string(data["aws_secret_access_key"]) + } else { + keyID = common.MustGetEnv("AWS_ACCESS_KEY_ID") + secretKey = common.MustGetEnv("AWS_SECRET_ACCESS_KEY") + } + + awsSession := session.Must(session.NewSession(&aws.Config{ + Credentials: credentials.NewStaticCredentials(keyID, secretKey, ""), + })) + + r53AssumeRoleClient := route53.New(awsSession) + r53AssumeRoleClient.Config.WithCredentials(stscreds.NewCredentials(awsSession, roleARN)) + + return r53AssumeRoleClient, nil +} + +// getResourceRecordValues gets the values (target address/IPs) of the DNS resource record associated with the provided +// zoneId, recordName, and recordType. If the record is an alias resource record, it will return the target DNS name. +// Otherwise, it will return the target IP address(es). The return type, map[string]struct{}, provides a convenient type +// for existence checking. +func getResourceRecordValues(zoneId, recordName, recordType string) (map[string]struct{}, error) { + records, err := r53ClientAssumeRole.ListResourceRecordSets(&route53.ListResourceRecordSetsInput{ + HostedZoneId: &zoneId, + StartRecordName: &recordName, + StartRecordType: &recordType, + }) + if err != nil { + return nil, fmt.Errorf("failed to list resource record sets: %w", err) + } + + if len(records.ResourceRecordSets) == 0 { + return nil, nil + } + + recordList := make(map[string]struct{}) + if records.ResourceRecordSets[0].AliasTarget != nil { + recordList[*records.ResourceRecordSets[0].AliasTarget.DNSName] = struct{}{} + } else { + for _, record := range records.ResourceRecordSets[0].ResourceRecords { + recordList[*record.Value] = struct{}{} + } + } + + return recordList, nil +} diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/k8s.io/apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiserver/pkg/storage/names/generate.go b/vendor/k8s.io/apiserver/pkg/storage/names/generate.go new file mode 100644 index 00000000..0b8afff0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/names/generate.go @@ -0,0 +1,54 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "fmt" + + utilrand "k8s.io/apimachinery/pkg/util/rand" +) + +// NameGenerator generates names for objects. Some backends may have more information +// available to guide selection of new names and this interface hides those details. +type NameGenerator interface { + // GenerateName generates a valid name from the base name, adding a random suffix to + // the base. If base is valid, the returned name must also be valid. The generator is + // responsible for knowing the maximum valid name length. + GenerateName(base string) string +} + +// simpleNameGenerator generates random names. +type simpleNameGenerator struct{} + +// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics +// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes +// name (63 characters) +var SimpleNameGenerator NameGenerator = simpleNameGenerator{} + +const ( + // TODO: make this flexible for non-core resources with alternate naming rules. + maxNameLength = 63 + randomLength = 5 + MaxGeneratedNameLength = maxNameLength - randomLength +) + +func (simpleNameGenerator) GenerateName(base string) string { + if len(base) > MaxGeneratedNameLength { + base = base[:MaxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index e4baa213..0ad28654 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -697,6 +697,9 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect +# k8s.io/apiserver v0.27.2 +## explicit; go 1.20 +k8s.io/apiserver/pkg/storage/names # k8s.io/client-go v0.27.4 ## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1