From 363e6975783b47d38ac70d265575a238586675c7 Mon Sep 17 00:00:00 2001 From: Ciara Stacke <18287516+ciarams87@users.noreply.github.com> Date: Wed, 30 Nov 2022 15:57:14 +0000 Subject: [PATCH] feature: Support Dynamic namespaces using Labels (#3299) * Add labels flag and create namespace watcher when present * Create namespaced watchers dynamically for simple case * Handle unwatched app protect resources * Add dynamic watchers to externaldns and cert-manager controllers * Add initial python tests * Add app protect waf tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add python tests for cert-manager and external-dns * Add helm config and docs * Update logging for new and deleted watched namespaces * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- cmd/nginx-ingress/flags.go | 66 +-- cmd/nginx-ingress/main.go | 24 +- deployments/helm-chart/README.md | 5 +- .../templates/controller-daemonset.yaml | 3 + .../templates/controller-deployment.yaml | 3 + deployments/helm-chart/values.yaml | 5 +- .../command-line-arguments.md | 14 +- .../installation/installation-with-helm.md | 5 +- internal/certmanager/cm_controller.go | 89 +++- internal/configs/configurator.go | 77 +++- internal/externaldns/controller.go | 79 +++- internal/k8s/controller.go | 382 +++++++++++++++--- internal/k8s/handlers.go | 34 ++ internal/k8s/task_queue.go | 3 + tests/data/common/ns-patch.yaml | 3 + .../foreign-ns-virtual-server.yaml | 20 + .../watched-ns-virtual-server.yaml | 20 + .../watched-ns2-virtual-server.yaml | 20 + .../test_app_protect_watch_namespace_label.py | 217 ++++++++++ .../suite/test_virtual_server_certmanager.py | 57 ++- .../suite/test_virtual_server_externaldns.py | 59 ++- tests/suite/test_watch_namespace_label.py | 232 +++++++++++ tests/suite/utils/resources_utils.py | 18 + 23 files changed, 1287 insertions(+), 148 deletions(-) create mode 100644 tests/data/common/ns-patch.yaml create mode 100644 tests/data/watch-namespace/foreign-ns-virtual-server.yaml create mode 100644 tests/data/watch-namespace/watched-ns-virtual-server.yaml create mode 100644 tests/data/watch-namespace/watched-ns2-virtual-server.yaml create mode 100644 tests/suite/test_app_protect_watch_namespace_label.py create mode 100644 tests/suite/test_watch_namespace_label.py diff --git a/cmd/nginx-ingress/flags.go b/cmd/nginx-ingress/flags.go index c4bf503e76..0384f0625b 100644 --- a/cmd/nginx-ingress/flags.go +++ b/cmd/nginx-ingress/flags.go @@ -11,6 +11,7 @@ import ( "github.com/golang/glog" api_v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation" ) @@ -27,15 +28,18 @@ var ( The Ingress Controller does not start NGINX and does not write any generated NGINX configuration files to disk`) watchNamespace = flag.String("watch-namespace", api_v1.NamespaceAll, - `Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces`) + `Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. Mutually exclusive with "watch-namespace-label".`) watchNamespaces []string watchSecretNamespace = flag.String("watch-secret-namespace", "", - `Comma separated list of namespaces the Ingress Controller should watch for secrets. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See "watch-namespace". `) + `Comma separated list of namespaces the Ingress Controller should watch for secrets. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See "watch-namespace" and "watch-namespace-label". `) watchSecretNamespaces []string + watchNamespaceLabel = flag.String("watch-namespace-label", "", + `Configures the Ingress Controller to watch only those namespaces with label foo=bar. By default the Ingress Controller watches all namespaces. Mutually exclusive with "watch-namespace". `) + nginxConfigMaps = flag.String("nginx-configmaps", "", `A ConfigMap resource for customizing NGINX configuration. If a ConfigMap is set, but the Ingress Controller is not able to fetch it from Kubernetes API, the Ingress Controller will fail to start. @@ -192,17 +196,7 @@ func parseFlags() { initialChecks() - watchNamespaces = strings.Split(*watchNamespace, ",") - glog.Infof("Namespaces watched: %v", watchNamespaces) - - if len(*watchSecretNamespace) > 0 { - watchSecretNamespaces = strings.Split(*watchSecretNamespace, ",") - } else { - // empty => default to watched namespaces - watchSecretNamespaces = watchNamespaces - } - - glog.Infof("Namespaces watched for secrets: %v", watchSecretNamespaces) + validateWatchedNamespaces() validationChecks() @@ -295,6 +289,42 @@ func initialChecks() { } } +func validateWatchedNamespaces() { + if *watchNamespace != "" && *watchNamespaceLabel != "" { + glog.Fatal("watch-namespace and -watch-namespace-label are mutually exclusive") + } + + watchNamespaces = strings.Split(*watchNamespace, ",") + + if *watchNamespace != "" { + glog.Infof("Namespaces watched: %v", watchNamespaces) + namespacesNameValidationError := validateNamespaceNames(watchNamespaces) + if namespacesNameValidationError != nil { + glog.Fatalf("Invalid values for namespaces: %v", namespacesNameValidationError) + } + } + + if len(*watchSecretNamespace) > 0 { + watchSecretNamespaces = strings.Split(*watchSecretNamespace, ",") + glog.Infof("Namespaces watched for secrets: %v", watchSecretNamespaces) + namespacesNameValidationError := validateNamespaceNames(watchSecretNamespaces) + if namespacesNameValidationError != nil { + glog.Fatalf("Invalid values for secret namespaces: %v", namespacesNameValidationError) + } + } else { + // empty => default to watched namespaces + watchSecretNamespaces = watchNamespaces + } + + if *watchNamespaceLabel != "" { + var err error + _, err = labels.Parse(*watchNamespaceLabel) + if err != nil { + glog.Fatalf("Unable to parse label %v for watch namespace label: %v", *watchNamespaceLabel, err) + } + } +} + // validationChecks checks the values for various flags func validationChecks() { healthStatusURIValidationError := validateLocation(*healthStatusURI) @@ -307,16 +337,6 @@ func validationChecks() { glog.Fatalf("Invalid value for leader-election-lock-name: %v", statusLockNameValidationError) } - namespacesNameValidationError := validateNamespaceNames(watchNamespaces) - if namespacesNameValidationError != nil { - glog.Fatalf("Invalid values for namespaces: %v", namespacesNameValidationError) - } - - namespacesNameValidationError = validateNamespaceNames(watchSecretNamespaces) - if namespacesNameValidationError != nil { - glog.Fatalf("Invalid values for secret namespaces: %v", namespacesNameValidationError) - } - statusPortValidationError := validatePort(*nginxStatusPort) if statusPortValidationError != nil { glog.Fatalf("Invalid value for nginx-status-port: %v", statusPortValidationError) diff --git a/cmd/nginx-ingress/main.go b/cmd/nginx-ingress/main.go index 68ed5345ae..3fbf2a4f4c 100644 --- a/cmd/nginx-ingress/main.go +++ b/cmd/nginx-ingress/main.go @@ -53,9 +53,7 @@ func main() { validateIngressClass(kubeClient) - checkNamespaceExists(kubeClient, watchNamespaces) - - checkNamespaceExists(kubeClient, watchSecretNamespaces) + checkNamespaces(kubeClient) dynClient, confClient := createCustomClients(config) @@ -160,6 +158,7 @@ func main() { CertManagerEnabled: *enableCertManager, ExternalDNSEnabled: *enableExternalDNS, IsIPV6Disabled: *disableIPV6, + WatchNamespaceLabel: *watchNamespaceLabel, } lbc := k8s.NewLoadBalancerController(lbcInput) @@ -243,6 +242,25 @@ func validateIngressClass(kubeClient kubernetes.Interface) { } } +func checkNamespaces(kubeClient kubernetes.Interface) { + if *watchNamespaceLabel != "" { + // bootstrap the watched namespace list + var newWatchNamespaces []string + nsList, err := kubeClient.CoreV1().Namespaces().List(context.TODO(), meta_v1.ListOptions{LabelSelector: *watchNamespaceLabel}) + if err != nil { + glog.Errorf("error when getting Namespaces with the label selector %v: %v", watchNamespaceLabel, err) + } + for _, ns := range nsList.Items { + newWatchNamespaces = append(newWatchNamespaces, ns.Name) + } + watchNamespaces = newWatchNamespaces + glog.Infof("Namespaces watched using label %v: %v", *watchNamespaceLabel, watchNamespaces) + } else { + checkNamespaceExists(kubeClient, watchNamespaces) + } + checkNamespaceExists(kubeClient, watchSecretNamespaces) +} + func checkNamespaceExists(kubeClient kubernetes.Interface, namespaces []string) { for _, ns := range namespaces { if ns != "" { diff --git a/deployments/helm-chart/README.md b/deployments/helm-chart/README.md index fce988fce2..84353f5fae 100644 --- a/deployments/helm-chart/README.md +++ b/deployments/helm-chart/README.md @@ -186,8 +186,9 @@ Parameter | Description | Default `controller.replicaCount` | The number of replicas of the Ingress Controller deployment. | 1 `controller.ingressClass` | A class of the Ingress Controller. An IngressClass resource with the name equal to the class must be deployed. Otherwise, the Ingress Controller will fail to start. The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class. The Ingress Controller processes all the VirtualServer/VirtualServerRoute/TransportServer resources that do not have the "ingressClassName" field for all versions of kubernetes. | nginx `controller.setAsDefaultIngress` | New Ingresses without an `"ingressClassName"` field specified will be assigned the class specified in `controller.ingressClass`. | false -`controller.watchNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchNamespace="default\,nginx-ingress"`. | "" -`controller.watchSecretNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources of type Secret. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See `watch-namespace`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchSecretNamespace="default\,nginx-ingress"`. | "" +`controller.watchNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchNamespace="default\,nginx-ingress"`. | "" +`controller.watchNamespaceLabel` | Configures the Ingress Controller to watch only those namespaces with label foo=bar. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespace`. | "" +`controller.watchSecretNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources of type Secret. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See `controller.watchNamespace` and `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchSecretNamespace="default\,nginx-ingress"`. | "" `controller.enableCustomResources` | Enable the custom resources. | true `controller.enablePreviewPolicies` | Enable preview policies. This parameter is deprecated. To enable OIDC Policies please use `controller.enableOIDC` instead. | false `controller.enableOIDC` | Enable OIDC policies. | false diff --git a/deployments/helm-chart/templates/controller-daemonset.yaml b/deployments/helm-chart/templates/controller-daemonset.yaml index 7b311fa2d8..1c2c74cbc8 100644 --- a/deployments/helm-chart/templates/controller-daemonset.yaml +++ b/deployments/helm-chart/templates/controller-daemonset.yaml @@ -164,6 +164,9 @@ spec: {{- if .Values.controller.watchNamespace }} - -watch-namespace={{ .Values.controller.watchNamespace }} {{- end }} +{{- if .Values.controller.watchNamespaceLabel }} + - -watch-namespace-label={{ .Values.controller.watchNamespaceLabel }} +{{- end }} {{- if .Values.controller.watchSecretNamespace }} - -watch-secret-namespace={{ .Values.controller.watchSecretNamespace }} {{- end }} diff --git a/deployments/helm-chart/templates/controller-deployment.yaml b/deployments/helm-chart/templates/controller-deployment.yaml index 9ec53bf972..c4bd207515 100644 --- a/deployments/helm-chart/templates/controller-deployment.yaml +++ b/deployments/helm-chart/templates/controller-deployment.yaml @@ -167,6 +167,9 @@ spec: {{- if .Values.controller.watchNamespace }} - -watch-namespace={{ .Values.controller.watchNamespace }} {{- end }} +{{- if .Values.controller.watchNamespaceLabel }} + - -watch-namespace-label={{ .Values.controller.watchNamespaceLabel }} +{{- end }} {{- if .Values.controller.watchSecretNamespace }} - -watch-secret-namespace={{ .Values.controller.watchSecretNamespace }} {{- end }} diff --git a/deployments/helm-chart/values.yaml b/deployments/helm-chart/values.yaml index 1a0dd650f9..ec7b7edcd4 100644 --- a/deployments/helm-chart/values.yaml +++ b/deployments/helm-chart/values.yaml @@ -198,9 +198,12 @@ controller: ## New Ingresses without an ingressClassName field specified will be assigned the class specified in `controller.ingressClass`. setAsDefaultIngress: false - ## Comma separated list of namespaces to watch for Ingress resources. By default the Ingress Controller watches all namespaces. + ## Comma separated list of namespaces to watch for Ingress resources. By default the Ingress Controller watches all namespaces. Mutually exclusive with "controller.watchNamespaceLabel". watchNamespace: "" + ## Configures the Ingress Controller to watch only those namespaces with label foo=bar. By default the Ingress Controller watches all namespaces. Mutually exclusive with "controller.watchNamespace". + watchNamespaceLabel: "" + ## Comma separated list of namespaces to watch for Secret resources. By default the Ingress Controller watches all namespaces. watchSecretNamespace: "" diff --git a/docs/content/configuration/global-configuration/command-line-arguments.md b/docs/content/configuration/global-configuration/command-line-arguments.md index 5d192acc57..ed8de9f3da 100644 --- a/docs/content/configuration/global-configuration/command-line-arguments.md +++ b/docs/content/configuration/global-configuration/command-line-arguments.md @@ -302,7 +302,19 @@ A comma-separated list of pattern=N settings for file-filtered logging. ### -watch-namespace `` -Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. +Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. Mutually exclusive with "watch-namespace-label". +  + + +### -watch-namespace-label `` + +Configures the Ingress Controller to watch only those namespaces with label foo=bar. By default the Ingress Controller watches all namespaces. Mutually exclusive with "watch-namespace". +  + + +### -watch-secret-namespace `` + +Comma separated list of namespaces the Ingress Controller should watch for secrets. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See "watch-namespace" and "watch-namespace-label".   diff --git a/docs/content/installation/installation-with-helm.md b/docs/content/installation/installation-with-helm.md index 937e597abe..e598bcb0fd 100644 --- a/docs/content/installation/installation-with-helm.md +++ b/docs/content/installation/installation-with-helm.md @@ -185,8 +185,9 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont |``controller.replicaCount`` | The number of replicas of the Ingress Controller deployment. | 1 | |``controller.ingressClass`` | A class of the Ingress Controller. An IngressClass resource with the name equal to the class must be deployed. Otherwise, the Ingress Controller will fail to start. The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class. The Ingress Controller processes all the VirtualServer/VirtualServerRoute/TransportServer resources that do not have the "ingressClassName" field for all versions of kubernetes. | nginx | |``controller.setAsDefaultIngress`` | New Ingresses without an ingressClassName field specified will be assigned the class specified in `controller.ingressClass`. | false | -|``controller.watchNamespace`` | Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. ``--set controller.watchNamespace="default\,nginx-ingress"``. | "" | -|``controller.watchSecretNamespace`` | Comma separated list of namespaces the Ingress Controller should watch for resources of type Secret. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See `watch-namespace`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. ``--set controller.watchSecretNamespace="default\,nginx-ingress"``. | "" | +|``controller.watchNamespace`` | Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. ``--set controller.watchNamespace="default\,nginx-ingress"``. | "" | +|``controller.watchNamespaceLabel`` | Configures the Ingress Controller to watch only those namespaces with label foo=bar. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespace`. | "" | +|``controller.watchSecretNamespace`` | Comma separated list of namespaces the Ingress Controller should watch for resources of type Secret. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See `controller.watchNamespace` and `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. ``--set controller.watchSecretNamespace="default\,nginx-ingress"``. | "" | |``controller.enableCustomResources`` | Enable the custom resources. | true | |``controller.enablePreviewPolicies`` | Enable preview policies. This parameter is deprecated. To enable OIDC Policies please use ``controller.enableOIDC`` instead. | false | |``controller.enableOIDC`` | Enable OIDC policies. | false | diff --git a/internal/certmanager/cm_controller.go b/internal/certmanager/cm_controller.go index 03a6876b1c..59a75ef904 100644 --- a/internal/certmanager/cm_controller.go +++ b/internal/certmanager/cm_controller.go @@ -18,6 +18,7 @@ package certmanager import ( "context" "fmt" + "sync" "time" cmapi "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" @@ -58,7 +59,6 @@ const ( type CmController struct { sync SyncFn ctx context.Context - mustSync []cache.InformerSynced queue workqueue.RateLimitingInterface informerGroup map[string]*namespacedInformer recorder record.EventRecorder @@ -75,14 +75,18 @@ type CmOpts struct { namespace []string eventRecorder record.EventRecorder vsClient k8s_nginx.Interface + isDynamicNs bool } type namespacedInformer struct { + mustSync []cache.InformerSynced vsSharedInformerFactory vsinformers.SharedInformerFactory cmSharedInformerFactory cm_informers.SharedInformerFactory kubeSharedInformerFactory kubeinformers.SharedInformerFactory vsLister listers_v1.VirtualServerLister cmLister cmlisters.CertificateLister + stopCh chan struct{} + lock sync.RWMutex } func (c *CmController) register() workqueue.RateLimitingInterface { @@ -90,8 +94,22 @@ func (c *CmController) register() workqueue.RateLimitingInterface { return c.queue } -func (c *CmController) newNamespacedInformer(ns string) { +// BuildOpts builds a CmOpts from the given parameters +func BuildOpts(ctx context.Context, kc *rest.Config, cl kubernetes.Interface, ns []string, er record.EventRecorder, vsc k8s_nginx.Interface, idn bool) *CmOpts { + return &CmOpts{ + context: ctx, + kubeClient: cl, + kubeConfig: kc, + namespace: ns, + eventRecorder: er, + vsClient: vsc, + isDynamicNs: idn, + } +} + +func (c *CmController) newNamespacedInformer(ns string) *namespacedInformer { nsi := &namespacedInformer{} + nsi.stopCh = make(chan struct{}) nsi.cmSharedInformerFactory = cm_informers.NewSharedInformerFactoryWithOptions(c.cmClient, resyncPeriod, cm_informers.WithNamespace(ns)) nsi.kubeSharedInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(c.kubeClient, resyncPeriod, kubeinformers.WithNamespace(ns)) nsi.vsSharedInformerFactory = vsinformers.NewSharedInformerFactoryWithOptions(c.vsClient, resyncPeriod, vsinformers.WithNamespace(ns)) @@ -99,6 +117,7 @@ func (c *CmController) newNamespacedInformer(ns string) { c.addHandlers(nsi) c.informerGroup[ns] = nsi + return nsi } func (c *CmController) addHandlers(nsi *namespacedInformer) { @@ -106,13 +125,13 @@ func (c *CmController) addHandlers(nsi *namespacedInformer) { nsi.vsSharedInformerFactory.K8s().V1().VirtualServers().Informer().AddEventHandler(&controllerpkg.QueuingEventHandler{ Queue: c.queue, }) - c.mustSync = append(c.mustSync, nsi.vsSharedInformerFactory.K8s().V1().VirtualServers().Informer().HasSynced) + nsi.mustSync = append(nsi.mustSync, nsi.vsSharedInformerFactory.K8s().V1().VirtualServers().Informer().HasSynced) nsi.cmSharedInformerFactory.Certmanager().V1().Certificates().Informer().AddEventHandler(&controllerpkg.BlockingEventHandler{ WorkFunc: certificateHandler(c.queue), }) nsi.cmLister = nsi.cmSharedInformerFactory.Certmanager().V1().Certificates().Lister() - c.mustSync = append(c.mustSync, nsi.cmSharedInformerFactory.Certmanager().V1().Certificates().Informer().HasSynced) + nsi.mustSync = append(nsi.mustSync, nsi.cmSharedInformerFactory.Certmanager().V1().Certificates().Informer().HasSynced) } func (c *CmController) processItem(ctx context.Context, key string) error { @@ -192,6 +211,10 @@ func NewCmController(opts *CmOpts) *CmController { } for _, ns := range opts.namespace { + if opts.isDynamicNs && ns == "" { + // no initial namespaces with watched label - skip creating informers for now + break + } cm.newNamespacedInformer(ns) } @@ -209,14 +232,15 @@ func (c *CmController) Run(stopCh <-chan struct{}) { glog.Infof("Starting cert-manager control loop") + var mustSync []cache.InformerSynced for _, ig := range c.informerGroup { - go ig.vsSharedInformerFactory.Start(c.ctx.Done()) - go ig.cmSharedInformerFactory.Start(c.ctx.Done()) - go ig.kubeSharedInformerFactory.Start(c.ctx.Done()) + ig.start() + mustSync = append(mustSync, ig.mustSync...) } - // // wait for all the informer caches we depend on are synced - glog.V(3).Infof("Waiting for %d caches to sync", len(c.mustSync)) - if !cache.WaitForNamedCacheSync(ControllerName, stopCh, c.mustSync...) { + // wait for all the informer caches we depend on are synced + + glog.V(3).Infof("Waiting for %d caches to sync", len(mustSync)) + if !cache.WaitForNamedCacheSync(ControllerName, stopCh, mustSync...) { glog.Fatal("error syncing cm queue") } @@ -226,9 +250,22 @@ func (c *CmController) Run(stopCh <-chan struct{}) { <-stopCh glog.V(3).Infof("shutting down queue as workqueue signaled shutdown") + for _, ig := range c.informerGroup { + ig.stop() + } c.queue.ShutDown() } +func (nsi *namespacedInformer) start() { + go nsi.vsSharedInformerFactory.Start(nsi.stopCh) + go nsi.cmSharedInformerFactory.Start(nsi.stopCh) + go nsi.kubeSharedInformerFactory.Start(nsi.stopCh) +} + +func (nsi *namespacedInformer) stop() { + close(nsi.stopCh) +} + // runWorker is a long-running function that will continually call the // processItem function in order to read and process a message on the // workqueue. @@ -261,14 +298,28 @@ func (c *CmController) runWorker(ctx context.Context) { } } -// BuildOpts builds a CmOpts from the given parameters -func BuildOpts(ctx context.Context, kc *rest.Config, cl kubernetes.Interface, ns []string, er record.EventRecorder, vsc k8s_nginx.Interface) *CmOpts { - return &CmOpts{ - context: ctx, - kubeClient: cl, - kubeConfig: kc, - namespace: ns, - eventRecorder: er, - vsClient: vsc, +// AddNewNamespacedInformer adds watchers for a new namespace +func (c *CmController) AddNewNamespacedInformer(ns string) { + glog.V(3).Infof("Adding or Updating cert-manager Watchers for Namespace: %v", ns) + nsi := getNamespacedInformer(ns, c.informerGroup) + if nsi == nil { + nsi = c.newNamespacedInformer(ns) + nsi.start() + } + if !cache.WaitForCacheSync(nsi.stopCh, nsi.mustSync...) { + return + } +} + +// RemoveNamespacedInformer removes watchers for a namespace we are no longer watching +func (c *CmController) RemoveNamespacedInformer(ns string) { + glog.V(3).Infof("Deleting cert-manager Watchers for Deleted Namespace: %v", ns) + nsi := getNamespacedInformer(ns, c.informerGroup) + if nsi != nil { + nsi.lock.Lock() + defer nsi.lock.Unlock() + nsi.stop() + delete(c.informerGroup, ns) + nsi = nil } } diff --git a/internal/configs/configurator.go b/internal/configs/configurator.go index 237f199004..28099cf6d6 100644 --- a/internal/configs/configurator.go +++ b/internal/configs/configurator.go @@ -750,7 +750,7 @@ func GenerateCAFileContent(secret *api_v1.Secret) []byte { } // DeleteIngress deletes NGINX configuration for the Ingress resource. -func (cnf *Configurator) DeleteIngress(key string) error { +func (cnf *Configurator) DeleteIngress(key string, skipReload bool) error { name := keyToFileName(key) cnf.nginxManager.DeleteConfig(name) @@ -761,15 +761,17 @@ func (cnf *Configurator) DeleteIngress(key string) error { cnf.deleteIngressMetricsLabels(key) } - if err := cnf.reload(nginx.ReloadForOtherUpdate); err != nil { - return fmt.Errorf("error when removing ingress %v: %w", key, err) + if !skipReload { + if err := cnf.reload(nginx.ReloadForOtherUpdate); err != nil { + return fmt.Errorf("error when removing ingress %v: %w", key, err) + } } return nil } // DeleteVirtualServer deletes NGINX configuration for the VirtualServer resource. -func (cnf *Configurator) DeleteVirtualServer(key string) error { +func (cnf *Configurator) DeleteVirtualServer(key string, skipReload bool) error { name := getFileNameForVirtualServerFromKey(key) cnf.nginxManager.DeleteConfig(name) @@ -778,8 +780,10 @@ func (cnf *Configurator) DeleteVirtualServer(key string) error { cnf.deleteVirtualServerMetricsLabels(key) } - if err := cnf.reload(nginx.ReloadForOtherUpdate); err != nil { - return fmt.Errorf("error when removing VirtualServer %v: %w", key, err) + if !skipReload { + if err := cnf.reload(nginx.ReloadForOtherUpdate); err != nil { + return fmt.Errorf("error when removing VirtualServer %v: %w", key, err) + } } return nil @@ -1152,26 +1156,61 @@ func (cnf *Configurator) UpdateConfig(cfgParams *ConfigParams, resources Extende } // UpdateTransportServers updates TransportServers. -func (cnf *Configurator) UpdateTransportServers(updatedTSExes []*TransportServerEx, deletedKeys []string) error { +func (cnf *Configurator) UpdateTransportServers(updatedTSExes []*TransportServerEx, deletedKeys []string) []error { + var errList []error for _, tsEx := range updatedTSExes { _, err := cnf.addOrUpdateTransportServer(tsEx) if err != nil { - return fmt.Errorf("error adding or updating TransportServer %v/%v: %w", tsEx.TransportServer.Namespace, tsEx.TransportServer.Name, err) + errList = append(errList, fmt.Errorf("error adding or updating TransportServer %v/%v: %w", tsEx.TransportServer.Namespace, tsEx.TransportServer.Name, err)) } } for _, key := range deletedKeys { err := cnf.deleteTransportServer(key) if err != nil { - return fmt.Errorf("error when removing TransportServer %v: %w", key, err) + errList = append(errList, fmt.Errorf("error when removing TransportServer %v: %w", key, err)) } } if err := cnf.reload(nginx.ReloadForOtherUpdate); err != nil { - return fmt.Errorf("error when updating TransportServers: %w", err) + errList = append(errList, fmt.Errorf("error when updating TransportServers: %w", err)) } - return nil + return errList +} + +// BatchDeleteVirtualServers takes a list of VirtualServer resource keys, deletes their configuration, and reloads once +func (cnf *Configurator) BatchDeleteVirtualServers(deletedKeys []string) []error { + var errList []error + for _, key := range deletedKeys { + err := cnf.DeleteVirtualServer(key, true) + if err != nil { + errList = append(errList, fmt.Errorf("error when removing VirtualServer %v: %w", key, err)) + } + } + + if err := cnf.reload(nginx.ReloadForOtherUpdate); err != nil { + errList = append(errList, fmt.Errorf("error when reloading NGINX for deleted VirtualServers: %w", err)) + } + + return errList +} + +// BatchDeleteIngresses takes a list of Ingress resource keys, deletes their configuration, and reloads once +func (cnf *Configurator) BatchDeleteIngresses(deletedKeys []string) []error { + var errList []error + for _, key := range deletedKeys { + err := cnf.DeleteIngress(key, true) + if err != nil { + errList = append(errList, fmt.Errorf("error when removing Ingress %v: %w", key, err)) + } + } + + if err := cnf.reload(nginx.ReloadForOtherUpdate); err != nil { + errList = append(errList, fmt.Errorf("error when reloading NGINX for deleted Ingresses: %w", err)) + } + + return errList } func keyToFileName(key string) string { @@ -1437,20 +1476,24 @@ func (cnf *Configurator) addOrUpdateIngressesAndVirtualServers(ingExes []*Ingres // DeleteAppProtectPolicy updates Ingresses and VirtualServers that use AP Policy after that policy is deleted func (cnf *Configurator) DeleteAppProtectPolicy(resource *unstructured.Unstructured, ingExes []*IngressEx, mergeableIngresses []*MergeableIngresses, vsExes []*VirtualServerEx) (Warnings, error) { + warnings := newWarnings() + var err error if len(ingExes)+len(mergeableIngresses)+len(vsExes) > 0 { - cnf.nginxManager.DeleteAppProtectResourceFile(appProtectPolicyFileNameFromUnstruct(resource)) + warnings, err = cnf.AddOrUpdateAppProtectResource(resource, ingExes, mergeableIngresses, vsExes) } - - return cnf.AddOrUpdateAppProtectResource(resource, ingExes, mergeableIngresses, vsExes) + cnf.nginxManager.DeleteAppProtectResourceFile(appProtectPolicyFileNameFromUnstruct(resource)) + return warnings, err } // DeleteAppProtectLogConf updates Ingresses and VirtualServers that use AP Log Configuration after that policy is deleted func (cnf *Configurator) DeleteAppProtectLogConf(resource *unstructured.Unstructured, ingExes []*IngressEx, mergeableIngresses []*MergeableIngresses, vsExes []*VirtualServerEx) (Warnings, error) { + warnings := newWarnings() + var err error if len(ingExes)+len(mergeableIngresses)+len(vsExes) > 0 { - cnf.nginxManager.DeleteAppProtectResourceFile(appProtectLogConfFileNameFromUnstruct(resource)) + warnings, err = cnf.AddOrUpdateAppProtectResource(resource, ingExes, mergeableIngresses, vsExes) } - - return cnf.AddOrUpdateAppProtectResource(resource, ingExes, mergeableIngresses, vsExes) + cnf.nginxManager.DeleteAppProtectResourceFile(appProtectLogConfFileNameFromUnstruct(resource)) + return warnings, err } // RefreshAppProtectUserSigs writes all valid UDS files to fs and reloads NGINX diff --git a/internal/externaldns/controller.go b/internal/externaldns/controller.go index 12e7ba466f..57d258bf96 100644 --- a/internal/externaldns/controller.go +++ b/internal/externaldns/controller.go @@ -3,6 +3,7 @@ package externaldns import ( "context" "fmt" + "sync" "time" "github.com/golang/glog" @@ -30,7 +31,6 @@ const ( type ExtDNSController struct { sync SyncFn ctx context.Context - mustSync []cache.InformerSynced queue workqueue.RateLimitingInterface recorder record.EventRecorder client k8s_nginx.Interface @@ -42,6 +42,9 @@ type namespacedInformer struct { vsLister listersV1.VirtualServerLister sharedInformerFactory k8s_nginx_informers.SharedInformerFactory extdnslister extdnslisters.DNSEndpointLister + mustSync []cache.InformerSynced + stopCh chan struct{} + lock sync.RWMutex } // ExtDNSOpts represents config required for building the External DNS Controller. @@ -51,6 +54,7 @@ type ExtDNSOpts struct { eventRecorder record.EventRecorder client k8s_nginx.Interface resyncPeriod time.Duration + isDynamicNs bool } // NewController takes external dns config and return a new External DNS Controller. @@ -66,6 +70,10 @@ func NewController(opts *ExtDNSOpts) *ExtDNSController { } for _, ns := range opts.namespace { + if opts.isDynamicNs && ns == "" { + // no initial namespaces with watched label - skip creating informers for now + break + } c.newNamespacedInformer(ns) } @@ -73,8 +81,8 @@ func NewController(opts *ExtDNSOpts) *ExtDNSController { return c } -func (c *ExtDNSController) newNamespacedInformer(ns string) { - nsi := namespacedInformer{sharedInformerFactory: k8s_nginx_informers.NewSharedInformerFactoryWithOptions(c.client, c.resync, k8s_nginx_informers.WithNamespace(ns))} +func (c *ExtDNSController) newNamespacedInformer(ns string) *namespacedInformer { + nsi := &namespacedInformer{sharedInformerFactory: k8s_nginx_informers.NewSharedInformerFactoryWithOptions(c.client, c.resync, k8s_nginx_informers.WithNamespace(ns))} nsi.vsLister = nsi.sharedInformerFactory.K8s().V1().VirtualServers().Lister() nsi.extdnslister = nsi.sharedInformerFactory.Externaldns().V1().DNSEndpoints().Lister() @@ -88,11 +96,12 @@ func (c *ExtDNSController) newNamespacedInformer(ns string) { WorkFunc: externalDNSHandler(c.queue), }) - c.mustSync = append(c.mustSync, + nsi.mustSync = append(nsi.mustSync, nsi.sharedInformerFactory.K8s().V1().VirtualServers().Informer().HasSynced, nsi.sharedInformerFactory.Externaldns().V1().DNSEndpoints().Informer().HasSynced, ) - c.informerGroup[ns] = &nsi + c.informerGroup[ns] = nsi + return nsi } // Run sets up the event handlers for types we are interested in, as well @@ -105,13 +114,15 @@ func (c *ExtDNSController) Run(stopCh <-chan struct{}) { glog.Infof("Starting external-dns control loop") + var mustSync []cache.InformerSynced for _, ig := range c.informerGroup { - go ig.sharedInformerFactory.Start(c.ctx.Done()) + ig.start() + mustSync = append(mustSync, ig.mustSync...) } // wait for all informer caches to be synced - glog.V(3).Infof("Waiting for %d caches to sync", len(c.mustSync)) - if !cache.WaitForNamedCacheSync(ControllerName, stopCh, c.mustSync...) { + glog.V(3).Infof("Waiting for %d caches to sync", len(mustSync)) + if !cache.WaitForNamedCacheSync(ControllerName, stopCh, mustSync...) { glog.Fatal("error syncing extDNS queue") } @@ -121,9 +132,20 @@ func (c *ExtDNSController) Run(stopCh <-chan struct{}) { <-stopCh glog.V(3).Infof("shutting down queue as workqueue signaled shutdown") + for _, ig := range c.informerGroup { + ig.stop() + } c.queue.ShutDown() } +func (nsi *namespacedInformer) start() { + go nsi.sharedInformerFactory.Start(nsi.stopCh) +} + +func (nsi *namespacedInformer) stop() { + close(nsi.stopCh) +} + // runWorker is a long-running function that will continually call the processItem // function in order to read and process a message on the workqueue. func (c *ExtDNSController) runWorker(ctx context.Context) { @@ -195,19 +217,14 @@ func externalDNSHandler(queue workqueue.RateLimitingInterface) func(obj interfac } // BuildOpts builds the externalDNS controller options -func BuildOpts( - ctx context.Context, - namespace []string, - recorder record.EventRecorder, - k8sNginxClient k8s_nginx.Interface, - resync time.Duration, -) *ExtDNSOpts { +func BuildOpts(ctx context.Context, ns []string, rdr record.EventRecorder, client k8s_nginx.Interface, resync time.Duration, idn bool) *ExtDNSOpts { return &ExtDNSOpts{ context: ctx, - namespace: namespace, - eventRecorder: recorder, - client: k8sNginxClient, + namespace: ns, + eventRecorder: rdr, + client: client, resyncPeriod: resync, + isDynamicNs: idn, } } @@ -228,3 +245,29 @@ func getNamespacedInformer(ns string, ig map[string]*namespacedInformer) *namesp } return nsi } + +// AddNewNamespacedInformer adds watchers for a new namespace +func (c *ExtDNSController) AddNewNamespacedInformer(ns string) { + glog.V(3).Infof("Adding or Updating cert-manager Watchers for Namespace: %v", ns) + nsi := getNamespacedInformer(ns, c.informerGroup) + if nsi == nil { + nsi = c.newNamespacedInformer(ns) + nsi.start() + } + if !cache.WaitForCacheSync(nsi.stopCh, nsi.mustSync...) { + return + } +} + +// RemoveNamespacedInformer removes watchers for a namespace we are no longer watching +func (c *ExtDNSController) RemoveNamespacedInformer(ns string) { + glog.V(3).Infof("Deleting cert-manager Watchers for Deleted Namespace: %v", ns) + nsi := getNamespacedInformer(ns, c.informerGroup) + if nsi != nil { + nsi.lock.Lock() + defer nsi.lock.Unlock() + nsi.stop() + delete(c.informerGroup, ns) + nsi = nil + } +} diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 6c242739dc..382365ae6d 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -113,6 +113,7 @@ type LoadBalancerController struct { configMapLister storeToConfigMapLister globalConfigurationLister cache.Store ingressLinkLister cache.Store + namespaceLabeledLister cache.Store syncQueue *taskQueue ctx context.Context cancel context.CancelFunc @@ -156,6 +157,7 @@ type LoadBalancerController struct { externalDNSController *ed_controller.ExtDNSController batchSyncEnabled bool isIPV6Disabled bool + namespaceWatcherController cache.Controller } var keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc @@ -199,6 +201,7 @@ type NewLoadBalancerControllerInput struct { CertManagerEnabled bool ExternalDNSEnabled bool IsIPV6Disabled bool + WatchNamespaceLabel string } // NewLoadBalancerController creates a controller @@ -250,18 +253,28 @@ func NewLoadBalancerController(input NewLoadBalancerControllerInput) *LoadBalanc } } + isDynamicNs := input.WatchNamespaceLabel != "" + + if isDynamicNs { + lbc.addNamespaceHandler(createNamespaceHandlers(lbc), input.WatchNamespaceLabel) + } + if input.CertManagerEnabled { - lbc.certManagerController = cm_controller.NewCmController(cm_controller.BuildOpts(context.TODO(), lbc.restConfig, lbc.client, lbc.namespaceList, lbc.recorder, lbc.confClient)) + lbc.certManagerController = cm_controller.NewCmController(cm_controller.BuildOpts(context.TODO(), lbc.restConfig, lbc.client, lbc.namespaceList, lbc.recorder, lbc.confClient, isDynamicNs)) } if input.ExternalDNSEnabled { - lbc.externalDNSController = ed_controller.NewController(ed_controller.BuildOpts(context.TODO(), lbc.namespaceList, lbc.recorder, lbc.confClient, input.ResyncPeriod)) + lbc.externalDNSController = ed_controller.NewController(ed_controller.BuildOpts(context.TODO(), lbc.namespaceList, lbc.recorder, lbc.confClient, input.ResyncPeriod, isDynamicNs)) } glog.V(3).Infof("Nginx Ingress Controller has class: %v", input.IngressClass) lbc.namespacedInformers = make(map[string]*namespacedInformer) for _, ns := range lbc.namespaceList { + if isDynamicNs && ns == "" { + // no initial namespaces with watched label - skip creating informers for now + break + } lbc.newNamespacedInformer(ns) } @@ -326,7 +339,7 @@ func NewLoadBalancerController(input NewLoadBalancerControllerInput) *LoadBalanc } type namespacedInformer struct { - // namespace string + namespace string sharedInformerFactory informers.SharedInformerFactory confSharedInformerFactory k8s_nginx_informers.SharedInformerFactory secretInformerFactory informers.SharedInformerFactory @@ -350,18 +363,22 @@ type namespacedInformer struct { areCustomResourcesEnabled bool appProtectEnabled bool appProtectDosEnabled bool - stopCh <-chan struct{} + stopCh chan struct{} + lock sync.RWMutex + cacheSyncs []cache.InformerSynced } -func (lbc *LoadBalancerController) newNamespacedInformer(ns string) { +func (lbc *LoadBalancerController) newNamespacedInformer(ns string) *namespacedInformer { nsi := &namespacedInformer{} + nsi.stopCh = make(chan struct{}) + nsi.namespace = ns nsi.sharedInformerFactory = informers.NewSharedInformerFactoryWithOptions(lbc.client, lbc.resync, informers.WithNamespace(ns)) // create handlers for resources we care about - lbc.addIngressHandler(createIngressHandlers(lbc), nsi) - lbc.addServiceHandler(createServiceHandlers(lbc), nsi) - lbc.addEndpointSliceHandler(createEndpointSliceHandlers(lbc), nsi) - lbc.addPodHandler(nsi) + nsi.addIngressHandler(createIngressHandlers(lbc)) + nsi.addServiceHandler(createServiceHandlers(lbc)) + nsi.addEndpointSliceHandler(createEndpointSliceHandlers(lbc)) + nsi.addPodHandler() secretsTweakListOptionsFunc := func(options *meta_v1.ListOptions) { // Filter for helm release secrets. @@ -377,10 +394,10 @@ func (lbc *LoadBalancerController) newNamespacedInformer(ns string) { // Check if secrets informer should be created for this namespace for _, v := range lbc.secretNamespaceList { - if v == ns { + if v == "" || v == ns { nsi.isSecretsEnabledNamespace = true nsi.secretInformerFactory = informers.NewSharedInformerFactoryWithOptions(lbc.client, lbc.resync, informers.WithNamespace(ns), informers.WithTweakListOptions(secretsTweakListOptionsFunc)) - lbc.addSecretHandler(createSecretHandlers(lbc), nsi) + nsi.addSecretHandler(createSecretHandlers(lbc)) break } } @@ -389,33 +406,32 @@ func (lbc *LoadBalancerController) newNamespacedInformer(ns string) { nsi.areCustomResourcesEnabled = true nsi.confSharedInformerFactory = k8s_nginx_informers.NewSharedInformerFactoryWithOptions(lbc.confClient, lbc.resync, k8s_nginx_informers.WithNamespace(ns)) - lbc.addVirtualServerHandler(createVirtualServerHandlers(lbc), nsi) - lbc.addVirtualServerRouteHandler(createVirtualServerRouteHandlers(lbc), nsi) - lbc.addTransportServerHandler(createTransportServerHandlers(lbc), nsi) - lbc.addPolicyHandler(createPolicyHandlers(lbc), nsi) + nsi.addVirtualServerHandler(createVirtualServerHandlers(lbc)) + nsi.addVirtualServerRouteHandler(createVirtualServerRouteHandlers(lbc)) + nsi.addTransportServerHandler(createTransportServerHandlers(lbc)) + nsi.addPolicyHandler(createPolicyHandlers(lbc)) } if lbc.appProtectEnabled || lbc.appProtectDosEnabled { - for _, ns := range lbc.namespaceList { - nsi.dynInformerFactory = dynamicinformer.NewFilteredDynamicSharedInformerFactory(lbc.dynClient, 0, ns, nil) - } + nsi.dynInformerFactory = dynamicinformer.NewFilteredDynamicSharedInformerFactory(lbc.dynClient, 0, ns, nil) if lbc.appProtectEnabled { nsi.appProtectEnabled = true - lbc.addAppProtectPolicyHandler(createAppProtectPolicyHandlers(lbc), nsi) - lbc.addAppProtectLogConfHandler(createAppProtectLogConfHandlers(lbc), nsi) - lbc.addAppProtectUserSigHandler(createAppProtectUserSigHandlers(lbc), nsi) + nsi.addAppProtectPolicyHandler(createAppProtectPolicyHandlers(lbc)) + nsi.addAppProtectLogConfHandler(createAppProtectLogConfHandlers(lbc)) + nsi.addAppProtectUserSigHandler(createAppProtectUserSigHandlers(lbc)) } if lbc.appProtectDosEnabled { nsi.appProtectDosEnabled = true - lbc.addAppProtectDosPolicyHandler(createAppProtectDosPolicyHandlers(lbc), nsi) - lbc.addAppProtectDosLogConfHandler(createAppProtectDosLogConfHandlers(lbc), nsi) - lbc.addAppProtectDosProtectedResourceHandler(createAppProtectDosProtectedResourceHandlers(lbc), nsi) + nsi.addAppProtectDosPolicyHandler(createAppProtectDosPolicyHandlers(lbc)) + nsi.addAppProtectDosLogConfHandler(createAppProtectDosLogConfHandlers(lbc)) + nsi.addAppProtectDosProtectedResourceHandler(createAppProtectDosProtectedResourceHandlers(lbc)) } } lbc.namespacedInformers[ns] = nsi + return nsi } // addLeaderHandler adds the handler for leader election to the controller @@ -433,95 +449,95 @@ func (lbc *LoadBalancerController) AddSyncQueue(item interface{}) { } // addAppProtectPolicyHandler creates dynamic informers for custom appprotect policy resource -func (lbc *LoadBalancerController) addAppProtectPolicyHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addAppProtectPolicyHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.dynInformerFactory.ForResource(appprotect.PolicyGVR).Informer() informer.AddEventHandler(handlers) nsi.appProtectPolicyLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addAppProtectLogConfHandler creates dynamic informer for custom appprotect logging config resource -func (lbc *LoadBalancerController) addAppProtectLogConfHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addAppProtectLogConfHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.dynInformerFactory.ForResource(appprotect.LogConfGVR).Informer() informer.AddEventHandler(handlers) nsi.appProtectLogConfLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addAppProtectUserSigHandler creates dynamic informer for custom appprotect user defined signature resource -func (lbc *LoadBalancerController) addAppProtectUserSigHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addAppProtectUserSigHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.dynInformerFactory.ForResource(appprotect.UserSigGVR).Informer() informer.AddEventHandler(handlers) nsi.appProtectUserSigLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addAppProtectDosPolicyHandler creates dynamic informers for custom appprotectdos policy resource -func (lbc *LoadBalancerController) addAppProtectDosPolicyHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addAppProtectDosPolicyHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.dynInformerFactory.ForResource(appprotectdos.DosPolicyGVR).Informer() informer.AddEventHandler(handlers) nsi.appProtectDosPolicyLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addAppProtectDosLogConfHandler creates dynamic informer for custom appprotectdos logging config resource -func (lbc *LoadBalancerController) addAppProtectDosLogConfHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addAppProtectDosLogConfHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.dynInformerFactory.ForResource(appprotectdos.DosLogConfGVR).Informer() informer.AddEventHandler(handlers) nsi.appProtectDosLogConfLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addAppProtectDosLogConfHandler creates dynamic informers for custom appprotectdos logging config resource -func (lbc *LoadBalancerController) addAppProtectDosProtectedResourceHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addAppProtectDosProtectedResourceHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.confSharedInformerFactory.Appprotectdos().V1beta1().DosProtectedResources().Informer() informer.AddEventHandler(handlers) nsi.appProtectDosProtectedLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addSecretHandler adds the handler for secrets to the controller -func (lbc *LoadBalancerController) addSecretHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addSecretHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.secretInformerFactory.Core().V1().Secrets().Informer() informer.AddEventHandler(handlers) nsi.secretLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addServiceHandler adds the handler for services to the controller -func (lbc *LoadBalancerController) addServiceHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addServiceHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.sharedInformerFactory.Core().V1().Services().Informer() informer.AddEventHandler(handlers) nsi.svcLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addIngressHandler adds the handler for ingresses to the controller -func (lbc *LoadBalancerController) addIngressHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addIngressHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.sharedInformerFactory.Networking().V1().Ingresses().Informer() informer.AddEventHandler(handlers) nsi.ingressLister = storeToIngressLister{Store: informer.GetStore()} - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addEndpointSliceHandler adds the handler for EndpointSlices to the controller -func (lbc *LoadBalancerController) addEndpointSliceHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addEndpointSliceHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.sharedInformerFactory.Discovery().V1().EndpointSlices().Informer() informer.AddEventHandler(handlers) var el storeToEndpointSliceLister el.Store = informer.GetStore() nsi.endpointSliceLister = el - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } // addConfigMapHandler adds the handler for config maps to the controller @@ -539,35 +555,35 @@ func (lbc *LoadBalancerController) addConfigMapHandler(handlers cache.ResourceEv lbc.cacheSyncs = append(lbc.cacheSyncs, lbc.configMapController.HasSynced) } -func (lbc *LoadBalancerController) addPodHandler(nsi *namespacedInformer) { +func (nsi *namespacedInformer) addPodHandler() { informer := nsi.sharedInformerFactory.Core().V1().Pods().Informer() nsi.podLister = indexerToPodLister{Indexer: informer.GetIndexer()} - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } -func (lbc *LoadBalancerController) addVirtualServerHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addVirtualServerHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.confSharedInformerFactory.K8s().V1().VirtualServers().Informer() informer.AddEventHandler(handlers) nsi.virtualServerLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } -func (lbc *LoadBalancerController) addVirtualServerRouteHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addVirtualServerRouteHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.confSharedInformerFactory.K8s().V1().VirtualServerRoutes().Informer() informer.AddEventHandler(handlers) nsi.virtualServerRouteLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } -func (lbc *LoadBalancerController) addPolicyHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addPolicyHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.confSharedInformerFactory.K8s().V1().Policies().Informer() informer.AddEventHandler(handlers) nsi.policyLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } func (lbc *LoadBalancerController) addGlobalConfigurationHandler(handlers cache.ResourceEventHandlerFuncs, namespace string, name string) { @@ -584,12 +600,12 @@ func (lbc *LoadBalancerController) addGlobalConfigurationHandler(handlers cache. lbc.cacheSyncs = append(lbc.cacheSyncs, lbc.globalConfigurationController.HasSynced) } -func (lbc *LoadBalancerController) addTransportServerHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +func (nsi *namespacedInformer) addTransportServerHandler(handlers cache.ResourceEventHandlerFuncs) { informer := nsi.confSharedInformerFactory.K8s().V1alpha1().TransportServers().Informer() informer.AddEventHandler(handlers) nsi.transportServerLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + nsi.cacheSyncs = append(nsi.cacheSyncs, informer.HasSynced) } func (lbc *LoadBalancerController) addIngressLinkHandler(handlers cache.ResourceEventHandlerFuncs, name string) { @@ -608,10 +624,26 @@ func (lbc *LoadBalancerController) addIngressLinkHandler(handlers cache.Resource lbc.cacheSyncs = append(lbc.cacheSyncs, lbc.ingressLinkInformer.HasSynced) } +func (lbc *LoadBalancerController) addNamespaceHandler(handlers cache.ResourceEventHandlerFuncs, nsLabel string) { + optionsModifier := func(options *meta_v1.ListOptions) { + options.LabelSelector = nsLabel + } + nsInformer := informers.NewSharedInformerFactoryWithOptions(lbc.client, lbc.resync, informers.WithTweakListOptions(optionsModifier)).Core().V1().Namespaces().Informer() + nsInformer.AddEventHandler(handlers) + lbc.namespaceLabeledLister = nsInformer.GetStore() + lbc.namespaceWatcherController = nsInformer + + lbc.cacheSyncs = append(lbc.cacheSyncs, nsInformer.HasSynced) +} + // Run starts the loadbalancer controller func (lbc *LoadBalancerController) Run() { lbc.ctx, lbc.cancel = context.WithCancel(context.Background()) + if lbc.namespaceWatcherController != nil { + go lbc.namespaceWatcherController.Run(lbc.ctx.Done()) + } + if lbc.spiffeCertFetcher != nil { err := lbc.spiffeCertFetcher.Start(lbc.ctx, lbc.addInternalRouteServer) if err != nil { @@ -629,7 +661,6 @@ func (lbc *LoadBalancerController) Run() { } for _, nif := range lbc.namespacedInformers { - nif.stopCh = lbc.ctx.Done() nif.start() } @@ -644,9 +675,15 @@ func (lbc *LoadBalancerController) Run() { go lbc.ingressLinkInformer.Run(lbc.ctx.Done()) } - glog.V(3).Infof("Waiting for %d caches to sync", len(lbc.cacheSyncs)) + totalCacheSyncs := lbc.cacheSyncs + + for _, nif := range lbc.namespacedInformers { + totalCacheSyncs = append(totalCacheSyncs, nif.cacheSyncs...) + } - if !cache.WaitForCacheSync(lbc.ctx.Done(), lbc.cacheSyncs...) { + glog.V(3).Infof("Waiting for %d caches to sync", len(totalCacheSyncs)) + + if !cache.WaitForCacheSync(lbc.ctx.Done(), totalCacheSyncs...) { return } @@ -661,6 +698,9 @@ func (lbc *LoadBalancerController) Run() { // Stop shutsdown the load balancer controller func (lbc *LoadBalancerController) Stop() { lbc.cancel() + for _, nif := range lbc.namespacedInformers { + nif.stop() + } lbc.syncQueue.Shutdown() } @@ -680,6 +720,10 @@ func (nsi *namespacedInformer) start() { } } +func (nsi *namespacedInformer) stop() { + close(nsi.stopCh) +} + func (lbc *LoadBalancerController) getNamespacedInformer(ns string) *namespacedInformer { var nsi *namespacedInformer var isGlobalNs bool @@ -912,6 +956,8 @@ func (lbc *LoadBalancerController) sync(task task) { lbc.syncSecret(task) case service: lbc.syncService(task) + case namespace: + lbc.syncNamespace(task) case virtualserver: lbc.syncVirtualServer(task) lbc.updateVirtualServerMetrics() @@ -960,6 +1006,217 @@ func (lbc *LoadBalancerController) sync(task task) { } } +func (lbc *LoadBalancerController) syncNamespace(task task) { + key := task.Key + // process namespace and add to / remove from watched namespace list + _, exists, err := lbc.namespaceLabeledLister.GetByKey(key) + if err != nil { + lbc.syncQueue.Requeue(task, err) + return + } + + if !exists { + // Check if change is because of a new label, or because of a deleted namespace + ns, _ := lbc.client.CoreV1().Namespaces().Get(context.TODO(), key, meta_v1.GetOptions{}) + + if ns != nil && ns.Status.Phase == api_v1.NamespaceActive { + // namespace still exists + glog.Infof("Removing Configuration for Unwatched Namespace: %v", key) + // Watched label for namespace was removed + // delete any now unwatched namespaced informer groups if required + nsi := lbc.getNamespacedInformer(key) + if nsi != nil { + lbc.cleanupUnwatchedNamespacedResources(nsi) + delete(lbc.namespacedInformers, key) + } + } else { + glog.Infof("Deleting Watchers for Deleted Namespace: %v", key) + nsi := lbc.getNamespacedInformer(key) + if nsi != nil { + lbc.removeNamespacedInformer(nsi, key) + } + } + if lbc.certManagerController != nil { + lbc.certManagerController.RemoveNamespacedInformer(key) + } + if lbc.externalDNSController != nil { + lbc.externalDNSController.RemoveNamespacedInformer(key) + } + } else { + // check if informer group already exists + // if not create new namespaced informer group + // update cert-manager informer group if required + // update external-dns informer group if required + glog.V(3).Infof("Adding or Updating Watched Namespace: %v", key) + nsi := lbc.getNamespacedInformer(key) + if nsi == nil { + glog.Infof("Adding New Watched Namespace: %v", key) + nsi = lbc.newNamespacedInformer(key) + nsi.start() + } + if lbc.certManagerController != nil { + lbc.certManagerController.AddNewNamespacedInformer(key) + } + if lbc.externalDNSController != nil { + lbc.externalDNSController.AddNewNamespacedInformer(key) + } + if !cache.WaitForCacheSync(nsi.stopCh, nsi.cacheSyncs...) { + return + } + } +} + +func (lbc *LoadBalancerController) removeNamespacedInformer(nsi *namespacedInformer, key string) { + nsi.lock.Lock() + defer nsi.lock.Unlock() + nsi.stop() + delete(lbc.namespacedInformers, key) + nsi = nil +} + +func (lbc *LoadBalancerController) cleanupUnwatchedNamespacedResources(nsi *namespacedInformer) { + // if a namespace is not deleted but the label is removed: we see an update event, so we will stop watching that namespace, + // BUT we need to remove any configuration for resources deployed in that namespace and still maintained by us + nsi.lock.Lock() + defer nsi.lock.Unlock() + + var delIngressList []string + + il, err := nsi.ingressLister.List() + if err != nil { + glog.Warningf("unable to list Ingress resources for recently unwatched namespace %s", nsi.namespace) + } else { + for _, ing := range il.Items { + key := getResourceKey(&ing.ObjectMeta) + delIngressList = append(delIngressList, key) + lbc.configuration.DeleteIngress(key) + } + delIngErrs := lbc.configurator.BatchDeleteIngresses(delIngressList) + if len(delIngErrs) > 0 { + glog.Warningf("Received error(s) deleting Ingress configurations from unwatched namespace: %v", delIngErrs) + } + } + + if nsi.areCustomResourcesEnabled { + var delVsList []string + for _, obj := range nsi.virtualServerLister.List() { + vs := obj.(*conf_v1.VirtualServer) + key := getResourceKey(&vs.ObjectMeta) + delVsList = append(delVsList, key) + lbc.configuration.DeleteVirtualServer(key) + } + delVsErrs := lbc.configurator.BatchDeleteVirtualServers(delVsList) + if len(delVsErrs) > 0 { + glog.Warningf("Received error(s) deleting VirtualServer configurations from unwatched namespace: %v", delVsErrs) + } + + var delTsList []string + for _, obj := range nsi.transportServerLister.List() { + ts := obj.(*conf_v1alpha1.TransportServer) + key := getResourceKey(&ts.ObjectMeta) + delTsList = append(delTsList, key) + lbc.configuration.DeleteTransportServer(key) + } + var updatedTSExes []*configs.TransportServerEx + delTsErrs := lbc.configurator.UpdateTransportServers(updatedTSExes, delTsList) + if len(delTsErrs) > 0 { + glog.Warningf("Received error(s) deleting TransportServer configurations from unwatched namespace: %v", delVsErrs) + } + + for _, obj := range nsi.virtualServerRouteLister.List() { + vsr := obj.(*conf_v1.VirtualServerRoute) + key := getResourceKey(&vsr.ObjectMeta) + lbc.configuration.DeleteVirtualServerRoute(key) + } + } + if nsi.appProtectEnabled { + lbc.cleanupUnwatchedAppWafResources(nsi) + } + if nsi.appProtectDosEnabled { + lbc.cleanupUnwatchedAppDosResources(nsi) + } + for _, obj := range nsi.secretLister.List() { + sec := obj.(*api_v1.Secret) + key := getResourceKey(&sec.ObjectMeta) + resources := lbc.configuration.FindResourcesForSecret(sec.Namespace, sec.Name) + lbc.secretStore.DeleteSecret(key) + + glog.V(2).Infof("Deleting Secret: %v\n", key) + + if len(resources) > 0 { + lbc.handleRegularSecretDeletion(resources) + } + if lbc.isSpecialSecret(key) { + glog.Warningf("A special TLS Secret %v was removed. Retaining the Secret.", key) + } + } + glog.V(3).Infof("Finished cleaning up configuration for unwatched resources in namespace: %v", nsi.namespace) + nsi.stop() +} + +func (lbc *LoadBalancerController) cleanupUnwatchedAppWafResources(nsi *namespacedInformer) { + for _, obj := range nsi.appProtectPolicyLister.List() { + glog.V(3).Infof("Cleaning up unwatched appprotect policies in namespace: %v", nsi.namespace) + appPol := obj.((*unstructured.Unstructured)) + namespace := appPol.GetNamespace() + name := appPol.GetName() + + changes, problems := lbc.appProtectConfiguration.DeletePolicy(namespace + "/" + name) + lbc.processAppProtectChanges(changes) + lbc.processAppProtectProblems(problems) + } + for _, obj := range nsi.appProtectLogConfLister.List() { + glog.V(3).Infof("Cleaning up unwatched approtect logconfs in namespace: %v", nsi.namespace) + appLogConf := obj.((*unstructured.Unstructured)) + namespace := appLogConf.GetNamespace() + name := appLogConf.GetName() + + changes, problems := lbc.appProtectConfiguration.DeleteLogConf(namespace + "/" + name) + lbc.processAppProtectChanges(changes) + lbc.processAppProtectProblems(problems) + } + for _, obj := range nsi.appProtectUserSigLister.List() { + glog.V(3).Infof("Cleaning up unwatched usersigs in namespace: %v", nsi.namespace) + appUserSig := obj.((*unstructured.Unstructured)) + namespace := appUserSig.GetNamespace() + name := appUserSig.GetName() + + changes, problems := lbc.appProtectConfiguration.DeleteUserSig(namespace + "/" + name) + lbc.processAppProtectUserSigChange(changes) + lbc.processAppProtectProblems(problems) + } +} + +func (lbc *LoadBalancerController) cleanupUnwatchedAppDosResources(nsi *namespacedInformer) { + for _, obj := range nsi.appProtectDosPolicyLister.List() { + dosPol := obj.((*unstructured.Unstructured)) + namespace := dosPol.GetNamespace() + name := dosPol.GetName() + + changes, problems := lbc.dosConfiguration.DeletePolicy(namespace + "/" + name) + lbc.processAppProtectDosChanges(changes) + lbc.processAppProtectDosProblems(problems) + } + for _, obj := range nsi.appProtectDosProtectedLister.List() { + dosPol := obj.((*unstructured.Unstructured)) + namespace := dosPol.GetNamespace() + name := dosPol.GetName() + + changes, problems := lbc.dosConfiguration.DeleteProtectedResource(namespace + "/" + name) + lbc.processAppProtectDosChanges(changes) + lbc.processAppProtectDosProblems(problems) + } + for _, obj := range nsi.appProtectDosLogConfLister.List() { + dosPol := obj.((*unstructured.Unstructured)) + namespace := dosPol.GetNamespace() + name := dosPol.GetName() + + changes, problems := lbc.dosConfiguration.DeleteLogConf(namespace + "/" + name) + lbc.processAppProtectDosChanges(changes) + lbc.processAppProtectDosProblems(problems) + } +} + func (lbc *LoadBalancerController) syncIngressLink(task task) { key := task.Key glog.V(2).Infof("Adding, Updating or Deleting IngressLink: %v", key) @@ -1261,7 +1518,7 @@ func (lbc *LoadBalancerController) processChanges(changes []ResourceChange) { case *VirtualServerConfiguration: key := getResourceKey(&impl.VirtualServer.ObjectMeta) - deleteErr := lbc.configurator.DeleteVirtualServer(key) + deleteErr := lbc.configurator.DeleteVirtualServer(key, false) if deleteErr != nil { glog.Errorf("Error when deleting configuration for VirtualServer %v: %v", key, deleteErr) } @@ -1284,7 +1541,7 @@ func (lbc *LoadBalancerController) processChanges(changes []ResourceChange) { glog.V(2).Infof("Deleting Ingress: %v\n", key) - deleteErr := lbc.configurator.DeleteIngress(key) + deleteErr := lbc.configurator.DeleteIngress(key, false) if deleteErr != nil { glog.Errorf("Error when deleting configuration for Ingress %v: %v", key, deleteErr) } @@ -1351,7 +1608,12 @@ func (lbc *LoadBalancerController) processChangesFromGlobalConfiguration(changes } } - updateErr := lbc.configurator.UpdateTransportServers(updatedTSExes, deletedKeys) + var updateErr error + updateErrs := lbc.configurator.UpdateTransportServers(updatedTSExes, deletedKeys) + + if len(updateErrs) > 0 { + updateErr = fmt.Errorf("errors received from updating TransportServers after GlobalConfiguration change: %v", updateErrs) + } lbc.updateResourcesStatusAndEvents(updatedResources, configs.Warnings{}, updateErr) diff --git a/internal/k8s/handlers.go b/internal/k8s/handlers.go index b98b373e07..7b07b7f69b 100644 --- a/internal/k8s/handlers.go +++ b/internal/k8s/handlers.go @@ -679,3 +679,37 @@ func createAppProtectDosProtectedResourceHandlers(lbc *LoadBalancerController) c } return handlers } + +// createNamespaceHandlers builds the handler funcs for namespaces +func createNamespaceHandlers(lbc *LoadBalancerController) cache.ResourceEventHandlerFuncs { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + ns := obj.(*v1.Namespace) + glog.V(3).Infof("Adding Namespace to list of watched Namespaces: %v", ns.Name) + lbc.AddSyncQueue(obj) + }, + DeleteFunc: func(obj interface{}) { + ns, isNs := obj.(*v1.Namespace) + if !isNs { + deletedState, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + glog.V(3).Infof("Error received unexpected object: %v", obj) + return + } + ns, ok = deletedState.Obj.(*v1.Namespace) + if !ok { + glog.V(3).Infof("Error DeletedFinalStateUnknown contained non-Namespace object: %v", deletedState.Obj) + return + } + } + glog.V(3).Infof("Removing Namespace from list of watched Namespaces: %v", ns.Name) + lbc.AddSyncQueue(obj) + }, + UpdateFunc: func(old, cur interface{}) { + if !reflect.DeepEqual(old, cur) { + glog.V(3).Infof("Namespace %v changed, syncing", cur.(*v1.Namespace).Name) + lbc.AddSyncQueue(cur) + } + }, + } +} diff --git a/internal/k8s/task_queue.go b/internal/k8s/task_queue.go index 7406b4bbfd..502ef53576 100644 --- a/internal/k8s/task_queue.go +++ b/internal/k8s/task_queue.go @@ -114,6 +114,7 @@ const ( configMap secret service + namespace virtualserver virtualServerRoute globalConfiguration @@ -148,6 +149,8 @@ func newTask(key string, obj interface{}) (task, error) { k = secret case *v1.Service: k = service + case *v1.Namespace: + k = namespace case *conf_v1.VirtualServer: k = virtualserver case *conf_v1.VirtualServerRoute: diff --git a/tests/data/common/ns-patch.yaml b/tests/data/common/ns-patch.yaml new file mode 100644 index 0000000000..c926ca8f0d --- /dev/null +++ b/tests/data/common/ns-patch.yaml @@ -0,0 +1,3 @@ +metadata: + labels: + app: watch diff --git a/tests/data/watch-namespace/foreign-ns-virtual-server.yaml b/tests/data/watch-namespace/foreign-ns-virtual-server.yaml new file mode 100644 index 0000000000..62b8b76065 --- /dev/null +++ b/tests/data/watch-namespace/foreign-ns-virtual-server.yaml @@ -0,0 +1,20 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: foreign-vs +spec: + host: foreign.example.com + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + action: + pass: backend1 + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/data/watch-namespace/watched-ns-virtual-server.yaml b/tests/data/watch-namespace/watched-ns-virtual-server.yaml new file mode 100644 index 0000000000..962cd4a2e2 --- /dev/null +++ b/tests/data/watch-namespace/watched-ns-virtual-server.yaml @@ -0,0 +1,20 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: watched-vs +spec: + host: watched.example.com + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + action: + pass: backend1 + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/data/watch-namespace/watched-ns2-virtual-server.yaml b/tests/data/watch-namespace/watched-ns2-virtual-server.yaml new file mode 100644 index 0000000000..1505573be4 --- /dev/null +++ b/tests/data/watch-namespace/watched-ns2-virtual-server.yaml @@ -0,0 +1,20 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: watched-vs2 +spec: + host: watched2.example.com + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + action: + pass: backend1 + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/suite/test_app_protect_watch_namespace_label.py b/tests/suite/test_app_protect_watch_namespace_label.py new file mode 100644 index 0000000000..5837c93fc2 --- /dev/null +++ b/tests/suite/test_app_protect_watch_namespace_label.py @@ -0,0 +1,217 @@ +import time + +import pytest +import requests +from settings import TEST_DATA +from suite.utils.ap_resources_utils import ( + create_ap_logconf_from_yaml, + create_ap_policy_from_yaml, + delete_ap_logconf, + delete_ap_policy, +) +from suite.utils.resources_utils import ( + create_example_app, + create_ingress_with_ap_annotations, + create_items_from_yaml, + create_namespace_with_name_from_yaml, + delete_common_app, + delete_items_from_yaml, + delete_namespace, + ensure_connection_to_public_endpoint, + ensure_response_from_backend, + patch_namespace_with_label, + wait_before_test, + wait_until_all_pods_are_ready, +) +from suite.utils.yaml_utils import get_first_ingress_host_from_yaml + +# This test shows that a policy outside of the namespace test_namespace is not picked up by IC. + +valid_resp_body = "Server name:" +invalid_resp_body = "The requested URL was rejected. Please consult with your administrator." +reload_times = {} + + +class BackendSetup: + """ + Encapsulate the example details. + + Attributes: + req_url (str): + ingress_host (str): + """ + + def __init__(self, req_url, req_url_2, metrics_url, ingress_host, test_namespace, policy_namespace): + self.req_url = req_url + self.req_url_2 = req_url_2 + self.metrics_url = metrics_url + self.ingress_host = ingress_host + self.test_namespace = test_namespace + self.policy_namespace = policy_namespace + + +@pytest.fixture(scope="class") +def backend_setup(request, kube_apis, ingress_controller_endpoint) -> BackendSetup: + """ + Deploy a simple application and AppProtect manifests. + + :param request: pytest fixture + :param kube_apis: client apis + :param ingress_controller_endpoint: public endpoint + :param test_namespace: + :return: BackendSetup + """ + timestamp = round(time.time() * 1000) + test_namespace = f"test-namespace-{str(timestamp)}" + policy_namespace = f"policy-test-namespace-{str(timestamp)}" + policy = "file-block" + + create_namespace_with_name_from_yaml(kube_apis.v1, test_namespace, f"{TEST_DATA}/common/ns.yaml") + print("------------------------- Deploy backend application -------------------------") + + create_example_app(kube_apis, "simple", test_namespace) + req_url = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port_ssl}/backend1" + req_url_2 = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port_ssl}/backend2" + metrics_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.metrics_port}/metrics" + wait_until_all_pods_are_ready(kube_apis.v1, test_namespace) + ensure_connection_to_public_endpoint( + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.port, + ingress_controller_endpoint.port_ssl, + ) + + print("------------------------- Deploy Secret -----------------------------") + src_sec_yaml = f"{TEST_DATA}/appprotect/appprotect-secret.yaml" + create_items_from_yaml(kube_apis, src_sec_yaml, test_namespace) + + print("------------------------- Deploy logconf -----------------------------") + src_log_yaml = f"{TEST_DATA}/appprotect/logconf.yaml" + log_name = create_ap_logconf_from_yaml(kube_apis.custom_objects, src_log_yaml, test_namespace) + + print(f"------------------------- Deploy namespace: {policy_namespace} ---------------------------") + create_namespace_with_name_from_yaml(kube_apis.v1, policy_namespace, f"{TEST_DATA}/common/ns.yaml") + + print(f"------------------------- Deploy appolicy: {policy} ---------------------------") + src_pol_yaml = f"{TEST_DATA}/appprotect/{policy}.yaml" + pol_name = create_ap_policy_from_yaml(kube_apis.custom_objects, src_pol_yaml, policy_namespace) + + print("------------------------- Deploy ingress -----------------------------") + ingress_host = {} + src_ing_yaml = f"{TEST_DATA}/appprotect/appprotect-ingress.yaml" + create_ingress_with_ap_annotations( + kube_apis, src_ing_yaml, test_namespace, f"{policy_namespace}/{policy}", "True", "True", "127.0.0.1:514" + ) + ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) + wait_before_test() + + def fin(): + if request.config.getoption("--skip-fixture-teardown") == "no": + print("Clean up:") + src_ing_yaml = f"{TEST_DATA}/appprotect/appprotect-ingress.yaml" + delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace) + delete_ap_policy(kube_apis.custom_objects, pol_name, policy_namespace) + delete_namespace(kube_apis.v1, policy_namespace) + delete_ap_logconf(kube_apis.custom_objects, log_name, test_namespace) + delete_common_app(kube_apis, "simple", test_namespace) + src_sec_yaml = f"{TEST_DATA}/appprotect/appprotect-secret.yaml" + delete_items_from_yaml(kube_apis, src_sec_yaml, test_namespace) + delete_namespace(kube_apis.v1, test_namespace) + + request.addfinalizer(fin) + + return BackendSetup(req_url, req_url_2, metrics_url, ingress_host, test_namespace, policy_namespace) + + +@pytest.mark.skip_for_nginx_oss +@pytest.mark.appprotect +@pytest.mark.parametrize( + "crd_ingress_controller_with_ap", + [ + { + "extra_args": [ + f"-enable-custom-resources", + f"-enable-app-protect", + f"-enable-prometheus-metrics", + f"-watch-namespace-label=app=watch", + f"-v=3", + ] + } + ], + indirect=True, +) +class TestAppProtectWatchNamespaceLabelEnabled: + def test_responses(self, request, kube_apis, crd_ingress_controller_with_ap, backend_setup): + """ + Test file-block AppProtect policy with -watch-namespace-label + """ + patch_namespace_with_label( + kube_apis.v1, backend_setup.test_namespace, "watch", f"{TEST_DATA}/common/ns-patch.yaml" + ) + wait_before_test() + print("------------- Run test for AP policy: file-block not enforced --------------") + # The policy namespace does not have the watched label, show the policy is not enforced + print(f"Request URL: {backend_setup.req_url} and Host: {backend_setup.ingress_host}") + + ensure_response_from_backend(backend_setup.req_url, backend_setup.ingress_host, check404=True) + + print("----------------------- Send request ----------------------") + resp = requests.get( + f"{backend_setup.req_url}/test.bat", headers={"host": backend_setup.ingress_host}, verify=False + ) + + print(resp.text) + + assert valid_resp_body in resp.text + assert resp.status_code == 200 + + # Add the label to the policy namespace, show the policy is now enforced + patch_namespace_with_label( + kube_apis.v1, backend_setup.policy_namespace, "watch", f"{TEST_DATA}/common/ns-patch.yaml" + ) + wait_before_test(15) + print("------------- Run test for AP policy: file-block is enforced now --------------") + print(f"Request URL: {backend_setup.req_url} and Host: {backend_setup.ingress_host}") + + ensure_response_from_backend(backend_setup.req_url, backend_setup.ingress_host, check404=True) + + print("----------------------- Send request ----------------------") + resp = requests.get( + f"{backend_setup.req_url}/test.bat", headers={"host": backend_setup.ingress_host}, verify=False + ) + retry = 0 + while invalid_resp_body not in resp.text and retry <= 60: + resp = requests.get( + f"{backend_setup.req_url}/test.bat", headers={"host": backend_setup.ingress_host}, verify=False + ) + retry += 1 + wait_before_test(1) + print(f"Policy not yet enforced, retrying... #{retry}") + + assert invalid_resp_body in resp.text + assert resp.status_code == 200 + + # Remove the label again fro the policy namespace, show the policy is not enforced again + patch_namespace_with_label( + kube_apis.v1, backend_setup.policy_namespace, "nowatch", f"{TEST_DATA}/common/ns-patch.yaml" + ) + wait_before_test(15) + print("------------- Run test for AP policy: file-block not enforced again --------------") + print(f"Request URL: {backend_setup.req_url} and Host: {backend_setup.ingress_host}") + + ensure_response_from_backend(backend_setup.req_url, backend_setup.ingress_host, check404=True) + + print("----------------------- Send request ----------------------") + resp = requests.get( + f"{backend_setup.req_url}/test.bat", headers={"host": backend_setup.ingress_host}, verify=False + ) + retry = 0 + while valid_resp_body not in resp.text and retry <= 60: + resp = requests.get( + f"{backend_setup.req_url}/test.bat", headers={"host": backend_setup.ingress_host}, verify=False + ) + retry += 1 + wait_before_test(1) + print(f"Policy not yet removed, retrying... #{retry}") + + assert valid_resp_body in resp.text + assert resp.status_code == 200 diff --git a/tests/suite/test_virtual_server_certmanager.py b/tests/suite/test_virtual_server_certmanager.py index 87a4447be0..aabcf4582f 100644 --- a/tests/suite/test_virtual_server_certmanager.py +++ b/tests/suite/test_virtual_server_certmanager.py @@ -1,7 +1,12 @@ import pytest from settings import TEST_DATA from suite.utils.custom_assertions import wait_and_assert_status_code -from suite.utils.resources_utils import create_secret_from_yaml, is_secret_present, wait_before_test +from suite.utils.resources_utils import ( + create_secret_from_yaml, + is_secret_present, + patch_namespace_with_label, + wait_before_test, +) from suite.utils.vs_vsr_resources_utils import patch_virtual_server_from_yaml from suite.utils.yaml_utils import get_secret_name_from_vs_yaml @@ -94,3 +99,53 @@ def test_virtual_server_no_cm(self, kube_apis, crd_ingress_controller, create_ce ) wait_and_assert_status_code(200, virtual_server_setup.backend_1_url, virtual_server_setup.vs_host) wait_and_assert_status_code(200, virtual_server_setup.backend_2_url, virtual_server_setup.vs_host) + + +@pytest.mark.vs +@pytest.mark.smoke +@pytest.mark.parametrize( + "crd_ingress_controller, create_certmanager, virtual_server_setup", + [ + ( + { + "type": "complete", + "extra_args": [ + f"-enable-custom-resources", + f"-enable-cert-manager", + f"-watch-namespace-label=app=watch", + ], + }, + {"issuer_name": "self-signed"}, + {"example": "virtual-server-certmanager", "app_type": "simple"}, + ) + ], + indirect=True, +) +class TestCertManagerVirtualServerWatchLabel: + def test_responses_after_setup( + self, kube_apis, crd_ingress_controller, create_certmanager, virtual_server_setup, test_namespace + ): + print("\nStep 1: Not watching namespace - verify secret does not exist") + secret_name = get_secret_name_from_vs_yaml( + f"{TEST_DATA}/virtual-server-certmanager/standard/virtual-server.yaml" + ) + # add a wait to avoid a false negative + wait_before_test(10) + check = is_secret_present(kube_apis.v1, secret_name, virtual_server_setup.namespace) + assert check == False + + print("\nStep 2: Add label to namespace - Verify secret exists now") + patch_namespace_with_label(kube_apis.v1, test_namespace, "watch", f"{TEST_DATA}/common/ns-patch.yaml") + wait_before_test() + secret_name = get_secret_name_from_vs_yaml( + f"{TEST_DATA}/virtual-server-certmanager/standard/virtual-server.yaml" + ) + retry = 0 + while (not is_secret_present(kube_apis.v1, secret_name, virtual_server_setup.namespace)) and retry <= 10: + wait_before_test(1) + retry += 1 + print(f"Retrying {retry}") + + print("\nStep 2: verify connectivity") + wait_and_assert_status_code(200, virtual_server_setup.backend_1_url, virtual_server_setup.vs_host) + wait_and_assert_status_code(200, virtual_server_setup.backend_2_url, virtual_server_setup.vs_host) diff --git a/tests/suite/test_virtual_server_externaldns.py b/tests/suite/test_virtual_server_externaldns.py index 7830b84631..dc1382d502 100644 --- a/tests/suite/test_virtual_server_externaldns.py +++ b/tests/suite/test_virtual_server_externaldns.py @@ -2,7 +2,7 @@ from settings import TEST_DATA from suite.utils.custom_assertions import assert_event from suite.utils.custom_resources_utils import is_dnsendpoint_present -from suite.utils.resources_utils import get_events, wait_before_test +from suite.utils.resources_utils import get_events, patch_namespace_with_label, wait_before_test from suite.utils.vs_vsr_resources_utils import patch_virtual_server_from_yaml from suite.utils.yaml_utils import get_name_from_yaml, get_namespace_from_yaml @@ -65,3 +65,60 @@ def test_update_to_ed_in_vs( wait_before_test(5) events = get_events(kube_apis.v1, virtual_server_setup.namespace) assert_event(vs_event_update_text, events) + + +@pytest.mark.vs +@pytest.mark.smoke +@pytest.mark.parametrize( + "crd_ingress_controller_with_ed, create_externaldns, virtual_server_setup", + [ + ( + { + "type": "complete", + "extra_args": [ + f"-enable-custom-resources", + f"-enable-external-dns", + f"-watch-namespace-label=app=watch", + ], + }, + {}, + {"example": "virtual-server-external-dns", "app_type": "simple"}, + ) + ], + indirect=True, +) +class TestExternalDNSVirtualServerWatchLabel: + def test_responses_after_setup( + self, kube_apis, crd_ingress_controller_with_ed, create_externaldns, virtual_server_setup, test_namespace + ): + dns_ep_name = get_name_from_yaml(VS_YAML) + print("\nStep 0: Verify DNSEndpoint is not created without watched label") + retry = 0 + dep = is_dnsendpoint_present(kube_apis.custom_objects, dns_ep_name, virtual_server_setup.namespace) + # add a wait to avoid a false negative + wait_before_test(30) + dep = is_dnsendpoint_present(kube_apis.custom_objects, dns_ep_name, virtual_server_setup.namespace) + assert dep is False + print("\nStep 1: Verify DNSEndpoint exists after label is added to namespace") + patch_namespace_with_label(kube_apis.v1, test_namespace, "watch", f"{TEST_DATA}/common/ns-patch.yaml") + wait_before_test() + retry = 0 + dep = is_dnsendpoint_present(kube_apis.custom_objects, dns_ep_name, virtual_server_setup.namespace) + while dep == False and retry <= 60: + dep = is_dnsendpoint_present(kube_apis.custom_objects, dns_ep_name, virtual_server_setup.namespace) + retry += 1 + wait_before_test(1) + print(f"DNSEndpoint not created, retrying... #{retry}") + assert dep is True + print("\nStep 2: Verify external-dns picked up the record") + pod_ns = get_namespace_from_yaml(f"{TEST_DATA}/virtual-server-external-dns/external-dns.yaml") + pod_name = kube_apis.v1.list_namespaced_pod(pod_ns).items[0].metadata.name + log_contents = kube_apis.v1.read_namespaced_pod_log(pod_name, pod_ns) + wanted_string = "CREATE: virtual-server.example.com 0 IN A" + retry = 0 + while wanted_string not in log_contents and retry <= 60: + log_contents = kube_apis.v1.read_namespaced_pod_log(pod_name, pod_ns) + retry += 1 + wait_before_test(1) + print(f"External DNS not updated, retrying... #{retry}") + assert wanted_string in log_contents diff --git a/tests/suite/test_watch_namespace_label.py b/tests/suite/test_watch_namespace_label.py new file mode 100644 index 0000000000..2b80c2aff4 --- /dev/null +++ b/tests/suite/test_watch_namespace_label.py @@ -0,0 +1,232 @@ +from unittest import mock + +import pytest +import requests +from settings import TEST_DATA +from suite.utils.resources_utils import ( + create_example_app, + create_items_from_yaml, + create_namespace_with_name_from_yaml, + delete_namespace, + ensure_connection_to_public_endpoint, + ensure_response_from_backend, + patch_namespace_with_label, + wait_before_test, + wait_until_all_pods_are_ready, +) +from suite.utils.vs_vsr_resources_utils import create_virtual_server_from_yaml +from suite.utils.yaml_utils import get_first_host_from_yaml, get_first_ingress_host_from_yaml + + +class BackendSetup: + """ + Encapsulate the example details. + + Attributes: + req_url (str): + resource_hosts (dict): + """ + + def __init__(self, req_url, resource_hosts): + self.req_url = req_url + self.resource_hosts = resource_hosts + + +@pytest.fixture(scope="class") +def backend_setup(request, kube_apis, ingress_controller_endpoint) -> BackendSetup: + """ + Create 3 namespaces and deploy simple applications in them. + + :param request: pytest fixture + :param kube_apis: client apis + :param ingress_controller_endpoint: public endpoint + :return: BackendSetup + """ + resource_hosts = {} + namespaces = [] + for ns in ["watched-ns", "foreign-ns", "watched-ns2"]: + namespace, ingress_host = create_and_setup_namespace(kube_apis, ingress_controller_endpoint, ns) + resource_hosts[f"{ns}-ingress"] = ingress_host + namespaces.append(namespace) + + req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port}/backend1" + # add label to namespaces + patch_namespace_with_label(kube_apis.v1, "watched-ns", "watch", f"{TEST_DATA}/common/ns-patch.yaml") + patch_namespace_with_label(kube_apis.v1, "watched-ns2", "watch", f"{TEST_DATA}/common/ns-patch.yaml") + + def fin(): + if request.config.getoption("--skip-fixture-teardown") == "no": + print("Clean up:") + for ns in namespaces: + delete_namespace(kube_apis.v1, ns) + + request.addfinalizer(fin) + + return BackendSetup(req_url, resource_hosts) + + +@pytest.fixture(scope="class") +def backend_setup_vs(request, kube_apis, ingress_controller_endpoint) -> BackendSetup: + """ + Create 3 namespaces and deploy simple applications in them. + + :param request: pytest fixture + :param kube_apis: client apis + :param ingress_controller_endpoint: public endpoint + :return: BackendSetup + """ + resource_hosts = {} + namespaces = [] + for ns in ["watched-ns", "foreign-ns", "watched-ns2"]: + namespace, vs_host = create_and_setup_namespace(kube_apis, ingress_controller_endpoint, ns, is_vs=True) + resource_hosts[f"{ns}-vs"] = vs_host + namespaces.append(namespace) + + req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port}/backend1" + # add label to namespaces + patch_namespace_with_label(kube_apis.v1, "watched-ns", "watch", f"{TEST_DATA}/common/ns-patch.yaml") + patch_namespace_with_label(kube_apis.v1, "watched-ns2", "watch", f"{TEST_DATA}/common/ns-patch.yaml") + + def fin(): + if request.config.getoption("--skip-fixture-teardown") == "no": + print("Clean up:") + for ns in namespaces: + delete_namespace(kube_apis.v1, ns) + + request.addfinalizer(fin) + + return BackendSetup(req_url, resource_hosts) + + +def create_and_setup_namespace(kube_apis, ingress_controller_endpoint, ns_name, is_vs=False): + ns = create_namespace_with_name_from_yaml(kube_apis.v1, ns_name, f"{TEST_DATA}/common/ns.yaml") + print(f"------------------------- Deploy the backend in {ns} -----------------------------------") + create_example_app(kube_apis, "simple", ns) + src_ing_yaml = f"{TEST_DATA}/watch-namespace/{ns}-ingress.yaml" + src_vs_yaml = f"{TEST_DATA}/watch-namespace/{ns}-virtual-server.yaml" + if not is_vs: + create_items_from_yaml(kube_apis, src_ing_yaml, ns) + ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) + if is_vs: + create_virtual_server_from_yaml(kube_apis.custom_objects, src_vs_yaml, ns) + ingress_host = get_first_host_from_yaml(src_vs_yaml) + req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port}/backend1" + wait_until_all_pods_are_ready(kube_apis.v1, ns) + ensure_connection_to_public_endpoint( + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.port, + ingress_controller_endpoint.port_ssl, + ) + return ns, ingress_host + + +@pytest.mark.ingresses +@pytest.mark.parametrize( + "ingress_controller, expected_responses", + [ + pytest.param( + {"extra_args": ["-watch-namespace-label=app=watch"]}, + {"watched-ns-ingress": 200, "watched-ns2-ingress": 200, "foreign-ns-ingress": 404}, + ) + ], + indirect=["ingress_controller"], +) +class TestWatchNamespaceLabelIngress: + def test_response_codes(self, kube_apis, ingress_controller, backend_setup, expected_responses): + for ing in ["watched-ns-ingress", "watched-ns2-ingress", "foreign-ns-ingress"]: + ensure_response_from_backend(backend_setup.req_url, backend_setup.resource_hosts[ing]) + resp = mock.Mock() + resp.status_code = "None" + retry = 0 + while resp.status_code != expected_responses[ing] and retry < 3: + resp = requests.get(backend_setup.req_url, headers={"host": backend_setup.resource_hosts[ing]}) + retry = +1 + wait_before_test() + assert ( + resp.status_code == expected_responses[ing] + ), f"Expected: {expected_responses[ing]} response code for {backend_setup.resource_hosts[ing]}" + + # Add label to foreign-ns-ingress and show traffic being served + patch_namespace_with_label(kube_apis.v1, "foreign-ns", "watch", f"{TEST_DATA}/common/ns-patch.yaml") + ensure_response_from_backend(backend_setup.req_url, backend_setup.resource_hosts[ing]) + resp = mock.Mock() + resp.status_code = "None" + retry = 0 + ing = "foreign-ns-ingress" + while resp.status_code != 200 and retry < 3: + resp = requests.get(backend_setup.req_url, headers={"host": backend_setup.resource_hosts[ing]}) + retry = +1 + wait_before_test() + assert ( + resp.status_code == 200 + ), f"Expected: 200 response code for {backend_setup.resource_hosts[ing]} after adding the correct label" + + # Remove label from foreign-ns-ingress and show traffic being ignored again + patch_namespace_with_label(kube_apis.v1, "foreign-ns", "nowatch", f"{TEST_DATA}/common/ns-patch.yaml") + ensure_response_from_backend(backend_setup.req_url, backend_setup.resource_hosts[ing]) + resp = mock.Mock() + resp.status_code = "None" + retry = 0 + while resp.status_code != expected_responses[ing] and retry < 3: + resp = requests.get(backend_setup.req_url, headers={"host": backend_setup.resource_hosts[ing]}) + retry = +1 + wait_before_test() + assert ( + resp.status_code == expected_responses[ing] + ), f"Expected: {expected_responses[ing]} response code for {backend_setup.resource_hosts[ing]} after removing the watched label" + + +@pytest.mark.vs +@pytest.mark.parametrize( + "crd_ingress_controller, expected_responses", + [ + pytest.param( + {"type": "complete", "extra_args": ["-watch-namespace-label=app=watch", "-enable-custom-resources=true"]}, + {"watched-ns-vs": 200, "watched-ns2-vs": 200, "foreign-ns-vs": 404}, + ) + ], + indirect=["crd_ingress_controller"], +) +class TestWatchNamespaceLabelVS: + def test_response_codes(self, kube_apis, crd_ingress_controller, backend_setup_vs, expected_responses): + for vs in ["watched-ns-vs", "watched-ns2-vs", "foreign-ns-vs"]: + ensure_response_from_backend(backend_setup_vs.req_url, backend_setup_vs.resource_hosts[vs]) + resp = mock.Mock() + resp.status_code = "None" + retry = 0 + while resp.status_code != expected_responses[vs] and retry < 3: + resp = requests.get(backend_setup_vs.req_url, headers={"host": backend_setup_vs.resource_hosts[vs]}) + retry = +1 + wait_before_test() + assert ( + resp.status_code == expected_responses[vs] + ), f"Expected: {expected_responses[vs]} response code for {backend_setup_vs.resource_hosts[vs]}" + + # Add label to foreign-ns-vs and show traffic being served + patch_namespace_with_label(kube_apis.v1, "foreign-ns", "watch", f"{TEST_DATA}/common/ns-patch.yaml") + ensure_response_from_backend(backend_setup_vs.req_url, backend_setup_vs.resource_hosts[vs]) + resp = mock.Mock() + resp.status_code = "None" + retry = 0 + vs = "foreign-ns-vs" + while resp.status_code != 200 and retry < 3: + resp = requests.get(backend_setup_vs.req_url, headers={"host": backend_setup_vs.resource_hosts[vs]}) + retry = +1 + wait_before_test() + assert ( + resp.status_code == 200 + ), f"Expected: 200 response code for {backend_setup_vs.resource_hosts[vs]} after adding the correct label" + + # Remove label from foreign-ns-vs and show traffic being ignored again + patch_namespace_with_label(kube_apis.v1, "foreign-ns", "nowatch", f"{TEST_DATA}/common/ns-patch.yaml") + ensure_response_from_backend(backend_setup_vs.req_url, backend_setup_vs.resource_hosts[vs]) + resp = mock.Mock() + resp.status_code = "None" + retry = 0 + while resp.status_code != expected_responses[vs] and retry < 3: + resp = requests.get(backend_setup_vs.req_url, headers={"host": backend_setup_vs.resource_hosts[vs]}) + retry = +1 + wait_before_test() + assert ( + resp.status_code == expected_responses[vs] + ), f"Expected: {expected_responses[vs]} response code for {backend_setup_vs.resource_hosts[vs]} after removing the watched label" diff --git a/tests/suite/utils/resources_utils.py b/tests/suite/utils/resources_utils.py index 692ef4e058..e88cfe225e 100644 --- a/tests/suite/utils/resources_utils.py +++ b/tests/suite/utils/resources_utils.py @@ -707,6 +707,24 @@ def create_namespace_with_name_from_yaml(v1: CoreV1Api, name, yaml_manifest) -> return dep["metadata"]["name"] +def patch_namespace_with_label(v1: CoreV1Api, name, label, yaml_manifest) -> str: + """ + Update a namespace with a specific label based on a yaml manifest. + + :param v1: CoreV1Api + :param name: name + :param label: the name of the label + :param yaml_manifest: an absolute path to file + :return: str + """ + print(f"Update namespace {name} with label app={label}") + with open(yaml_manifest) as f: + dep = yaml.safe_load(f) + dep["metadata"]["labels"]["app"] = label + v1.patch_namespace(name, dep) + print(f"Namespace {name} updated with label: {label}") + + def create_service_account(v1: CoreV1Api, namespace, body) -> None: """ Create a ServiceAccount based on a dict.