From 385bb13f3882a7a214e168c106417461623dc39d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Wed, 9 Nov 2022 11:51:31 +0000 Subject: [PATCH 01/30] Generate working VS config using EndpointSlices --- internal/k8s/configuration.go | 20 +++-- internal/k8s/controller.go | 156 ++++++++++++++++++++++++++++++++++ internal/k8s/handlers.go | 34 ++++++++ internal/k8s/task_queue.go | 4 + internal/k8s/utils.go | 34 ++++++++ 5 files changed, 241 insertions(+), 7 deletions(-) diff --git a/internal/k8s/configuration.go b/internal/k8s/configuration.go index fd4c78bebc..303fe566fb 100644 --- a/internal/k8s/configuration.go +++ b/internal/k8s/configuration.go @@ -344,13 +344,14 @@ type Configuration struct { globalConfigurationValidator *validation.GlobalConfigurationValidator transportServerValidator *validation.TransportServerValidator - secretReferenceChecker *secretReferenceChecker - serviceReferenceChecker *serviceReferenceChecker - endpointReferenceChecker *serviceReferenceChecker - policyReferenceChecker *policyReferenceChecker - appPolicyReferenceChecker *appProtectResourceReferenceChecker - appLogConfReferenceChecker *appProtectResourceReferenceChecker - appDosProtectedChecker *dosResourceReferenceChecker + secretReferenceChecker *secretReferenceChecker + serviceReferenceChecker *serviceReferenceChecker + endpointReferenceChecker *serviceReferenceChecker + endpointSliceReferenceChecker *serviceReferenceChecker //New code + policyReferenceChecker *policyReferenceChecker + appPolicyReferenceChecker *appProtectResourceReferenceChecker + appLogConfReferenceChecker *appProtectResourceReferenceChecker + appDosProtectedChecker *dosResourceReferenceChecker isPlus bool appProtectEnabled bool @@ -855,6 +856,11 @@ func (c *Configuration) FindResourcesForEndpoints(endpointsNamespace string, end return c.findResourcesForResourceReference(endpointsNamespace, endpointsName, c.endpointReferenceChecker) } +// New Code +func (c *Configuration) FindResourcesForEndpointSlices(endpointSlicesNamespace string, endpointSliceName string) []Resource { + return c.findResourcesForResourceReference(endpointSlicesNamespace, endpointSliceName, c.endpointSliceReferenceChecker) +} + // FindResourcesForSecret finds resources that reference the specified secret. func (c *Configuration) FindResourcesForSecret(secretNamespace string, secretName string) []Resource { return c.findResourcesForResourceReference(secretNamespace, secretName, c.secretReferenceChecker) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index cf5a9d4e5b..3174b1b505 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -19,6 +19,7 @@ package k8s import ( "context" "fmt" + discovery_v1 "k8s.io/api/discovery/v1" "net" "strconv" "strings" @@ -115,6 +116,7 @@ type LoadBalancerController struct { ingressLister []storeToIngressLister svcLister []cache.Store endpointLister []storeToEndpointLister + endpointSliceLister []storeToEndpointSliceLister configMapLister storeToConfigMapLister podLister []indexerToPodLister secretLister []cache.Store @@ -282,6 +284,7 @@ func NewLoadBalancerController(input NewLoadBalancerControllerInput) *LoadBalanc lbc.addIngressHandler(createIngressHandlers(lbc)) lbc.addServiceHandler(createServiceHandlers(lbc)) lbc.addEndpointHandler(createEndpointHandlers(lbc)) + lbc.addEndpointSliceHandlerWithStore(createEndpointSliceHandlers(lbc)) //New code lbc.addPodHandler() secretsTweakListOptionsFunc := func(options *meta_v1.ListOptions) { @@ -519,6 +522,36 @@ func (lbc *LoadBalancerController) addEndpointHandler(handlers cache.ResourceEve } } +// addEndpointSliceHandlerWithStore adds the handler for EndpointSlices to the controller +func (lbc *LoadBalancerController) addEndpointSliceHandlerWithStore(handlers cache.ResourceEventHandlerFuncs) { + for _, sif := range lbc.sharedInformerFactory { + informer := sif.Discovery().V1().EndpointSlices().Informer() + informer.AddEventHandler(handlers) + var el storeToEndpointSliceLister + el.Store = informer.GetStore() + lbc.endpointSliceLister = append(lbc.endpointSliceLister, el) + + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) + } +} + +// addEndpointSliceHandlerWithIndexer adds the handler for EndpointSlices to the controller +//func (lbc *LoadBalancerController) addEndpointSliceHandlerWithIndexer(handlers cache.ResourceEventHandlerFuncs) { +// for _, sif := range lbc.sharedInformerFactory { +// informer := sif.Discovery().V1().EndpointSlices().Informer() +// indexerErr := informer.AddIndexers("metadata/label....") +// if indexerErr != nil { +// glog.Errorf("unable to add indexer %s for EndpointSlices", "the indexer") +// } +// informer.AddEventHandler(handlers) +// var el indexerToEndpointSliceLister +// el.Indexer = informer.GetIndexer().ByIndex("metadata/label...") +// lbc.endpointSliceLister = append(lbc.endpointSliceLister, el) +// +// lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) +// } +//} + // addConfigMapHandler adds the handler for config maps to the controller func (lbc *LoadBalancerController) addConfigMapHandler(handlers cache.ResourceEventHandlerFuncs, namespace string) { lbc.configMapLister.Store, lbc.configMapController = cache.NewInformer( @@ -745,6 +778,55 @@ func (lbc *LoadBalancerController) syncEndpoints(task task) { } } +func (lbc *LoadBalancerController) syncEndpointSlices(task task) { + key := task.Key + var obj interface{} + var endpointSliceExists bool + var err error + glog.V(3).Infof("Syncing EndpointSlices %v", key) + + for _, el := range lbc.endpointSliceLister { + obj, endpointSliceExists, err = el.GetByKey(key) + if endpointSliceExists { + break + } + } + + if err != nil { + lbc.syncQueue.Requeue(task, err) + return + } + + if !endpointSliceExists { + return + } + + endpointSlice := obj.(*discovery_v1.EndpointSlice) + //resources := lbc.configuration.FindResourcesForEndpointSlices(endpointSlice.Namespace, endpointSlice.Name) + // Find EndpointSlices based on service name. This is mapped for the label `kubernetes.io/service-name`. + svcResource := lbc.configuration.FindResourcesForService(endpointSlice.Namespace, endpointSlice.Labels["kubernetes.io/service-name"]) + + resourceExes := lbc.createExtendedResources(svcResource) + + if lbc.areCustomResourcesEnabled { + if len(resourceExes.VirtualServerExes) > 0 { + glog.V(3).Infof("Updating EndpointSlices for %v", resourceExes.VirtualServerExes) + err := lbc.configurator.UpdateEndpointsForVirtualServers(resourceExes.VirtualServerExes) + if err != nil { + glog.Errorf("Error updating EndpointSlices for %v: %v", resourceExes.VirtualServerExes, err) + } + } + + if len(resourceExes.TransportServerExes) > 0 { + glog.V(3).Infof("Updating EndpointSlices for %v", resourceExes.TransportServerExes) + err := lbc.configurator.UpdateEndpointsForTransportServers(resourceExes.TransportServerExes) + if err != nil { + glog.Errorf("Error updating EndpointSlices for %v: %v", resourceExes.TransportServerExes, err) + } + } + } +} + func (lbc *LoadBalancerController) createExtendedResources(resources []Resource) configs.ExtendedResources { var result configs.ExtendedResources @@ -893,6 +975,8 @@ func (lbc *LoadBalancerController) sync(task task) { lbc.syncConfigMap(task) case endpoints: lbc.syncEndpoints(task) + case endpointslice: + lbc.syncEndpointSlices(task) case secret: lbc.syncSecret(task) case service: @@ -3435,6 +3519,7 @@ func (lbc *LoadBalancerController) getExternalEndpointsForIngressBackend(backend } func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networking.IngressBackend, svc *api_v1.Service) (result []podEndpoint, isExternal bool, err error) { + // Get Endpoints from api_v1.Endpoints var endps api_v1.Endpoints for _, el := range lbc.endpointLister { endps, err = el.GetServiceEndpoints(svc) @@ -3442,6 +3527,19 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ break } } + + // New code + // Get Endpoints from discovery_v1.EndpointSlice + var endpointSlices discovery_v1.EndpointSlice + for _, el := range lbc.endpointSliceLister { + endpointSlices, err = el.GetServiceEndpointSlices(svc) + if err == nil { + break + } + } + + glog.V(3).Infof("Found EndpointSlices %v", endpointSlices) + if err != nil { if svc.Spec.Type == api_v1.ServiceTypeExternalName { if !lbc.isNginxPlus { @@ -3454,6 +3552,17 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ return nil, false, err } + //New code + //Check if our endpointslices object has endpoints in it. + if len(endpointSlices.Endpoints) >= 1 { + result, err = lbc.getEndpointsForPortFromEndpointSlice(endpointSlices, backend.Service.Port, svc) + + if err != nil { + return nil, false, err + } + return result, false, nil + } + result, err = lbc.getEndpointsForPort(endps, backend.Service.Port, svc) if err != nil { glog.V(3).Infof("Error getting endpoints for service %s port %v: %v", svc.Name, configs.GetBackendPortAsString(backend.Service.Port), err) @@ -3462,6 +3571,53 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ return result, false, nil } +func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlice(endpointSlice discovery_v1.EndpointSlice, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { + var targetPort int32 + var err error + + for _, port := range svc.Spec.Ports { + if (backendPort.Name == "" && port.Port == backendPort.Number) || port.Name == backendPort.Name { + targetPort, err = lbc.getTargetPort(port, svc) + if err != nil { + return nil, fmt.Errorf("error determining target port for port %v in Ingress: %w", backendPort, err) + } + break + } + } + + if targetPort == 0 { + return nil, fmt.Errorf("no port %v in svc %s", backendPort, svc.Name) + } + + // Check if the port referenced in the EndpointSlice is the same as + // the target port + for _, endpointSlicePort := range endpointSlice.Ports { + if *endpointSlicePort.Port == targetPort { + fmt.Println("Nice!") + var endpoints []podEndpoint + for _, endpoint := range endpointSlice.Endpoints { + for _, endpointAddress := range endpoint.Addresses { + address := ipv6SafeAddrPort(endpointAddress, *endpointSlicePort.Port) + podEndpoint := podEndpoint{ + Address: address, + } + if endpoint.TargetRef != nil { + parentType, parentName := + lbc.getPodOwnerTypeAndNameFromAddress(endpoint.TargetRef.Namespace, endpoint.TargetRef.Name) + podEndpoint.OwnerType = parentType + podEndpoint.OwnerName = parentName + podEndpoint.PodName = endpoint.TargetRef.Name + } + endpoints = append(endpoints, podEndpoint) + } + } + return endpoints, nil + } + } + + return nil, fmt.Errorf("no endpoints for target port %v in service %s", targetPort, svc.Name) +} + func (lbc *LoadBalancerController) getEndpointsForPort(endps api_v1.Endpoints, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { var targetPort int32 var err error diff --git a/internal/k8s/handlers.go b/internal/k8s/handlers.go index 5cb40e271a..d756ca2626 100644 --- a/internal/k8s/handlers.go +++ b/internal/k8s/handlers.go @@ -2,6 +2,7 @@ package k8s import ( "fmt" + discovery_v1 "k8s.io/api/discovery/v1" "reflect" "sort" @@ -94,6 +95,39 @@ func createEndpointHandlers(lbc *LoadBalancerController) cache.ResourceEventHand } } +// createEndpointSliceHandlers builds the handler funcs for EndpointSlices +func createEndpointSliceHandlers(lbc *LoadBalancerController) cache.ResourceEventHandlerFuncs { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + endpointSlice := obj.(*discovery_v1.EndpointSlice) + glog.V(3).Infof("Adding EndpointSlice: %v", endpointSlice.Name) + lbc.AddSyncQueue(obj) + }, + DeleteFunc: func(obj interface{}) { + endpointSlice, isEndpointSlice := obj.(*discovery_v1.EndpointSlice) + if !isEndpointSlice { + deletedState, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + glog.V(3).Infof("Error received unexpected object: %v", obj) + return + } + endpointSlice, ok = deletedState.Obj.(*discovery_v1.EndpointSlice) + if !ok { + glog.V(3).Infof("Error DeletedFinalStateUnknown contained non-EndpointSlice object: %v", deletedState.Obj) + return + } + } + glog.V(3).Infof("Removing EndpointSlice: %v", endpointSlice.Name) + lbc.AddSyncQueue(obj) + }, UpdateFunc: func(old, cur interface{}) { + if !reflect.DeepEqual(old, cur) { + glog.V(3).Infof("EndpointSlice %v changed, syncing", cur.(*discovery_v1.EndpointSlice).Name) + lbc.AddSyncQueue(cur) + } + }, + } +} + // createIngressHandlers builds the handler funcs for ingresses func createIngressHandlers(lbc *LoadBalancerController) cache.ResourceEventHandlerFuncs { return cache.ResourceEventHandlerFuncs{ diff --git a/internal/k8s/task_queue.go b/internal/k8s/task_queue.go index 021b415ed4..d96541956e 100644 --- a/internal/k8s/task_queue.go +++ b/internal/k8s/task_queue.go @@ -12,6 +12,7 @@ import ( conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" v1 "k8s.io/api/core/v1" + discovery_v1 "k8s.io/api/discovery/v1" networking "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/wait" @@ -110,6 +111,7 @@ type kind int const ( ingress = iota endpoints + endpointslice configMap secret service @@ -141,6 +143,8 @@ func newTask(key string, obj interface{}) (task, error) { k = ingress case *v1.Endpoints: k = endpoints + case *discovery_v1.EndpointSlice: + k = endpointslice case *v1.ConfigMap: k = configMap case *v1.Secret: diff --git a/internal/k8s/utils.go b/internal/k8s/utils.go index e6cc57fecf..c1c4ef4e97 100644 --- a/internal/k8s/utils.go +++ b/internal/k8s/utils.go @@ -18,6 +18,7 @@ package k8s import ( "fmt" + discovery_v1 "k8s.io/api/discovery/v1" "reflect" "strings" @@ -87,6 +88,16 @@ type storeToEndpointLister struct { cache.Store } +// Store for EndpointSlices +type storeToEndpointSliceLister struct { + cache.Store +} + +// Indexer for EndpointSlices +type indexerToEndpointSliceLister struct { + cache.Indexer +} + // GetServiceEndpoints returns the endpoints of a service, matched on service name. func (s *storeToEndpointLister) GetServiceEndpoints(svc *v1.Service) (ep v1.Endpoints, err error) { for _, m := range s.Store.List() { @@ -98,6 +109,29 @@ func (s *storeToEndpointLister) GetServiceEndpoints(svc *v1.Service) (ep v1.Endp return ep, fmt.Errorf("could not find endpoints for service: %v", svc.Name) } +// GetServiceEndpoints returns the endpoints of a service, matched on service name. +func (s *storeToEndpointSliceLister) GetServiceEndpointSlices(svc *v1.Service) (ep discovery_v1.EndpointSlice, err error) { + + // -- Code to be used when Indexer is set up + //svcNameKey := fmt.Sprintf("%s/%s", svc.Namespace, svc.Name) + //epStore, exists, err := s.Store.GetByKey(svcNameKey) + //ep = *epStore.(*discovery_v1.EndpointSlice) + //if !exists { + // + //} + //if err != nil { + // + //} + + for _, epStore := range s.Store.List() { + ep = *epStore.(*discovery_v1.EndpointSlice) + if svc.Name == ep.Labels["kubernetes.io/service-name"] && svc.Namespace == ep.Namespace { + return ep, nil + } + } + return ep, fmt.Errorf("could not find endpoints for service: %v", svc.Name) +} + // findPort locates the container port for the given pod and portName. If the // targetPort is a number, use that. If the targetPort is a string, look that // string up in all named ports in all containers in the target pod. If no From 9b3882baa939f480b4708bb495f1d6ccc42c339e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Wed, 9 Nov 2022 16:09:32 +0000 Subject: [PATCH 02/30] Update rbac to support watching endpointslices --- deployments/rbac/rbac.yaml | 8 ++++++++ internal/k8s/controller.go | 9 ++++++--- internal/k8s/utils.go | 1 + 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/deployments/rbac/rbac.yaml b/deployments/rbac/rbac.yaml index 0501d9f04e..57ed7e9baf 100644 --- a/deployments/rbac/rbac.yaml +++ b/deployments/rbac/rbac.yaml @@ -3,6 +3,14 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nginx-ingress rules: +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch - apiGroups: - "" resources: diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 3174b1b505..7117b36d54 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -3572,6 +3572,7 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ } func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlice(endpointSlice discovery_v1.EndpointSlice, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { + /// Move this code to a function called 'validateTargetPort` var targetPort int32 var err error @@ -3586,11 +3587,13 @@ func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlice(endpoint } if targetPort == 0 { - return nil, fmt.Errorf("no port %v in svc %s", backendPort, svc.Name) + return nil, fmt.Errorf("no port %v in service %s", backendPort, svc.Name) } + /// Move this code to a function called 'validateTargetPort` - // Check if the port referenced in the EndpointSlice is the same as - // the target port + // Endpoints may be duplicated across multiple EndpointSlices. + // Using a set to prevent returning duplicate endpoints. + //endpointSet := make(map[podEndpoint]struct{}) for _, endpointSlicePort := range endpointSlice.Ports { if *endpointSlicePort.Port == targetPort { fmt.Println("Nice!") diff --git a/internal/k8s/utils.go b/internal/k8s/utils.go index c1c4ef4e97..cfcc6a5d23 100644 --- a/internal/k8s/utils.go +++ b/internal/k8s/utils.go @@ -127,6 +127,7 @@ func (s *storeToEndpointSliceLister) GetServiceEndpointSlices(svc *v1.Service) ( ep = *epStore.(*discovery_v1.EndpointSlice) if svc.Name == ep.Labels["kubernetes.io/service-name"] && svc.Namespace == ep.Namespace { return ep, nil + // Return a list of endpointslices there can be more than one per service. } } return ep, fmt.Errorf("could not find endpoints for service: %v", svc.Name) From 3c03f54c062527e09eb3286c11fd3a6356bf2157 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Thu, 10 Nov 2022 10:35:02 +0000 Subject: [PATCH 03/30] Add check for multiple EndpointSlices per service. --- internal/k8s/controller.go | 6 +++--- internal/k8s/utils.go | 12 +++++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 7117b36d54..2ae3bc99aa 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -3530,7 +3530,7 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ // New code // Get Endpoints from discovery_v1.EndpointSlice - var endpointSlices discovery_v1.EndpointSlice + var endpointSlices []discovery_v1.EndpointSlice for _, el := range lbc.endpointSliceLister { endpointSlices, err = el.GetServiceEndpointSlices(svc) if err == nil { @@ -3554,8 +3554,8 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ //New code //Check if our endpointslices object has endpoints in it. - if len(endpointSlices.Endpoints) >= 1 { - result, err = lbc.getEndpointsForPortFromEndpointSlice(endpointSlices, backend.Service.Port, svc) + for _, endpointSlice := range endpointSlices { + result, err = lbc.getEndpointsForPortFromEndpointSlice(endpointSlice, backend.Service.Port, svc) if err != nil { return nil, false, err diff --git a/internal/k8s/utils.go b/internal/k8s/utils.go index cfcc6a5d23..d7503d839c 100644 --- a/internal/k8s/utils.go +++ b/internal/k8s/utils.go @@ -110,7 +110,7 @@ func (s *storeToEndpointLister) GetServiceEndpoints(svc *v1.Service) (ep v1.Endp } // GetServiceEndpoints returns the endpoints of a service, matched on service name. -func (s *storeToEndpointSliceLister) GetServiceEndpointSlices(svc *v1.Service) (ep discovery_v1.EndpointSlice, err error) { +func (s *storeToEndpointSliceLister) GetServiceEndpointSlices(svc *v1.Service) (endpointSlices []discovery_v1.EndpointSlice, err error) { // -- Code to be used when Indexer is set up //svcNameKey := fmt.Sprintf("%s/%s", svc.Namespace, svc.Name) @@ -122,15 +122,17 @@ func (s *storeToEndpointSliceLister) GetServiceEndpointSlices(svc *v1.Service) ( //if err != nil { // //} - for _, epStore := range s.Store.List() { - ep = *epStore.(*discovery_v1.EndpointSlice) + ep := *epStore.(*discovery_v1.EndpointSlice) if svc.Name == ep.Labels["kubernetes.io/service-name"] && svc.Namespace == ep.Namespace { - return ep, nil + endpointSlices = append(endpointSlices, ep) // Return a list of endpointslices there can be more than one per service. } } - return ep, fmt.Errorf("could not find endpoints for service: %v", svc.Name) + if len(endpointSlices) > 0 { + return endpointSlices, nil + } + return endpointSlices, fmt.Errorf("could not find endpoints for service: %v", svc.Name) } // findPort locates the container port for the given pod and portName. If the From 55ed20d036aa1c07858760e5daacde7fe332e383 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Thu, 10 Nov 2022 14:49:36 +0000 Subject: [PATCH 04/30] Return all endpoints for multiple endpointslices per service --- internal/k8s/controller.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 2ae3bc99aa..0fb0673c0a 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -3528,7 +3528,6 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ } } - // New code // Get Endpoints from discovery_v1.EndpointSlice var endpointSlices []discovery_v1.EndpointSlice for _, el := range lbc.endpointSliceLister { @@ -3552,13 +3551,14 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ return nil, false, err } - //New code - //Check if our endpointslices object has endpoints in it. - for _, endpointSlice := range endpointSlices { - result, err = lbc.getEndpointsForPortFromEndpointSlice(endpointSlice, backend.Service.Port, svc) - - if err != nil { - return nil, false, err + //Check if our EndpointSlices object has endpoints in it. + if len(endpointSlices) > 0 { + for _, endpointSlice := range endpointSlices { + podEndpoints, err := lbc.getEndpointsForPortFromEndpointSlice(endpointSlice, backend.Service.Port, svc) + result = append(result, podEndpoints...) + if err != nil { + return nil, false, err + } } return result, false, nil } From cf4bd2255da4bfd21758e92cf8c00348cc24e9f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Thu, 10 Nov 2022 16:35:47 +0000 Subject: [PATCH 05/30] Add initial unit test --- internal/k8s/controller.go | 39 ++++++++------- internal/k8s/controller_test.go | 85 +++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+), 20 deletions(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 0fb0673c0a..e0e23d689a 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -3537,8 +3537,6 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ } } - glog.V(3).Infof("Found EndpointSlices %v", endpointSlices) - if err != nil { if svc.Spec.Type == api_v1.ServiceTypeExternalName { if !lbc.isNginxPlus { @@ -3554,10 +3552,10 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ //Check if our EndpointSlices object has endpoints in it. if len(endpointSlices) > 0 { for _, endpointSlice := range endpointSlices { - podEndpoints, err := lbc.getEndpointsForPortFromEndpointSlice(endpointSlice, backend.Service.Port, svc) + podEndpoints, podEndpointsErr := lbc.getEndpointsForPortFromEndpointSlice(endpointSlice, backend.Service.Port, svc) result = append(result, podEndpoints...) - if err != nil { - return nil, false, err + if podEndpointsErr != nil { + return nil, false, podEndpointsErr } } return result, false, nil @@ -3571,33 +3569,30 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ return result, false, nil } -func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlice(endpointSlice discovery_v1.EndpointSlice, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { - /// Move this code to a function called 'validateTargetPort` - var targetPort int32 - var err error - +func (lbc *LoadBalancerController) validateTargetPort(backendPort networking.ServiceBackendPort, svc *api_v1.Service) (targetPort int32, err error) { for _, port := range svc.Spec.Ports { if (backendPort.Name == "" && port.Port == backendPort.Number) || port.Name == backendPort.Name { targetPort, err = lbc.getTargetPort(port, svc) if err != nil { - return nil, fmt.Errorf("error determining target port for port %v in Ingress: %w", backendPort, err) + return 0, fmt.Errorf("error determining target port for port %v in Ingress: %w", backendPort, err) } break } } - if targetPort == 0 { + return targetPort, nil +} + +func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlice(endpointSlice discovery_v1.EndpointSlice, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { + targetPort, err := lbc.validateTargetPort(backendPort, svc) + if err != nil { return nil, fmt.Errorf("no port %v in service %s", backendPort, svc.Name) } - /// Move this code to a function called 'validateTargetPort` - - // Endpoints may be duplicated across multiple EndpointSlices. - // Using a set to prevent returning duplicate endpoints. - //endpointSet := make(map[podEndpoint]struct{}) for _, endpointSlicePort := range endpointSlice.Ports { if *endpointSlicePort.Port == targetPort { - fmt.Println("Nice!") - var endpoints []podEndpoint + // Endpoints may be duplicated across multiple EndpointSlices. + // Using a set to prevent returning duplicate endpoints. + endpointSet := make(map[podEndpoint]struct{}) for _, endpoint := range endpointSlice.Endpoints { for _, endpointAddress := range endpoint.Addresses { address := ipv6SafeAddrPort(endpointAddress, *endpointSlicePort.Port) @@ -3611,9 +3606,13 @@ func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlice(endpoint podEndpoint.OwnerName = parentName podEndpoint.PodName = endpoint.TargetRef.Name } - endpoints = append(endpoints, podEndpoint) + endpointSet[podEndpoint] = struct{}{} } } + endpoints := make([]podEndpoint, 0, len(endpointSet)) + for ep := range endpointSet { + endpoints = append(endpoints, ep) + } return endpoints, nil } } diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 6034380aa1..d0e03ed83c 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -3,6 +3,7 @@ package k8s import ( "errors" "fmt" + discovery_v1 "k8s.io/api/discovery/v1" "reflect" "sort" "strings" @@ -470,6 +471,90 @@ func TestFormatWarningsMessages(t *testing.T) { } } +func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { + + lbc := LoadBalancerController{ + isNginxPlus: true, + } + + backendServicePort := networking.ServiceBackendPort{ + Number: 8080, + } + + tests := []struct { + desc string + targetPort int32 + svcEndpointSlices []discovery_v1.EndpointSlice + expectedEps []podEndpoint + }{ + { + desc: "duplicate endpoints in an endpointslice", + targetPort: 8080, + expectedEps: []podEndpoint{ + { + Address: "1.2.3.4:8080", + MeshPodOwner: configs.MeshPodOwner{ + OwnerType: "deployment", + OwnerName: "deploy-1", + }, + }, + }, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + }, + }, + { + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + }, + }, + }, + }, + } + + svc := api_v1.Service{ + TypeMeta: meta_v1.TypeMeta{}, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "coffee-svc", + Namespace: "default", + }, + Spec: api_v1.ServiceSpec{ + Ports: []api_v1.ServicePort{ + { + Name: "foo", + Port: 80, + TargetPort: intstr.FromInt(8080), + }, + }, + }, + Status: api_v1.ServiceStatus{}, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + //gotEndps := getEndpointsBySubselectedPods(test.targetPort, pods, svcEps) + for _, endpointSlice := range test.svcEndpointSlices { + _, _ = lbc.getEndpointsForPortFromEndpointSlice(endpointSlice, backendServicePort, &svc) + } + + //if !reflect.DeepEqual(gotEndps, test.expectedEps) { + // t.Errorf("getEndpointsBySubselectedPods() = %v, want %v", gotEndps, test.expectedEps) + //} + }) + } + +} + func TestGetEndpointsBySubselectedPods(t *testing.T) { boolPointer := func(b bool) *bool { return &b } tests := []struct { From 3e4032d3f4164ea3b5635ec00eba506800dceeb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Fri, 11 Nov 2022 11:02:15 +0000 Subject: [PATCH 06/30] Add test for duplicate endpoints across two endpointslices --- internal/k8s/controller.go | 77 ++++++++++++--------- internal/k8s/controller_test.go | 118 +++++++++++++++++++++++++++----- 2 files changed, 145 insertions(+), 50 deletions(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index e0e23d689a..097e880e77 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -3550,15 +3550,23 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ } //Check if our EndpointSlices object has endpoints in it. + //if len(endpointSlices) > 0 { + // for _, endpointSlice := range endpointSlices { + // podEndpoints, podEndpointsErr := lbc.getEndpointsForPortFromEndpointSlices(endpointSlice, backend.Service.Port, svc) + // result = append(result, podEndpoints...) + // if podEndpointsErr != nil { + // return nil, false, podEndpointsErr + // } + // } + // return result, false, nil + //} + if len(endpointSlices) > 0 { - for _, endpointSlice := range endpointSlices { - podEndpoints, podEndpointsErr := lbc.getEndpointsForPortFromEndpointSlice(endpointSlice, backend.Service.Port, svc) - result = append(result, podEndpoints...) - if podEndpointsErr != nil { - return nil, false, podEndpointsErr - } + podEndpoints, podEndpointsErr := lbc.getEndpointsForPortFromEndpointSlices(endpointSlices, backend.Service.Port, svc) + if podEndpointsErr != nil { + return nil, false, podEndpointsErr } - return result, false, nil + return podEndpoints, false, nil } result, err = lbc.getEndpointsForPort(endps, backend.Service.Port, svc) @@ -3583,41 +3591,44 @@ func (lbc *LoadBalancerController) validateTargetPort(backendPort networking.Ser return targetPort, nil } -func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlice(endpointSlice discovery_v1.EndpointSlice, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { +func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlices(endpointSlices []discovery_v1.EndpointSlice, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { targetPort, err := lbc.validateTargetPort(backendPort, svc) if err != nil { return nil, fmt.Errorf("no port %v in service %s", backendPort, svc.Name) } - for _, endpointSlicePort := range endpointSlice.Ports { - if *endpointSlicePort.Port == targetPort { - // Endpoints may be duplicated across multiple EndpointSlices. - // Using a set to prevent returning duplicate endpoints. - endpointSet := make(map[podEndpoint]struct{}) - for _, endpoint := range endpointSlice.Endpoints { - for _, endpointAddress := range endpoint.Addresses { - address := ipv6SafeAddrPort(endpointAddress, *endpointSlicePort.Port) - podEndpoint := podEndpoint{ - Address: address, - } - if endpoint.TargetRef != nil { - parentType, parentName := - lbc.getPodOwnerTypeAndNameFromAddress(endpoint.TargetRef.Namespace, endpoint.TargetRef.Name) - podEndpoint.OwnerType = parentType - podEndpoint.OwnerName = parentName - podEndpoint.PodName = endpoint.TargetRef.Name + endpointSet := make(map[podEndpoint]struct{}) + for _, endpointSlice := range endpointSlices { + for _, endpointSlicePort := range endpointSlice.Ports { + if *endpointSlicePort.Port == targetPort { + // Endpoints may be duplicated across multiple EndpointSlices. + // Using a set to prevent returning duplicate endpoints. + for _, endpoint := range endpointSlice.Endpoints { + for _, endpointAddress := range endpoint.Addresses { + address := ipv6SafeAddrPort(endpointAddress, *endpointSlicePort.Port) + podEndpoint := podEndpoint{ + Address: address, + } + if endpoint.TargetRef != nil { + parentType, parentName := + lbc.getPodOwnerTypeAndNameFromAddress(endpoint.TargetRef.Namespace, endpoint.TargetRef.Name) + podEndpoint.OwnerType = parentType + podEndpoint.OwnerName = parentName + podEndpoint.PodName = endpoint.TargetRef.Name + } + endpointSet[podEndpoint] = struct{}{} } - endpointSet[podEndpoint] = struct{}{} } } - endpoints := make([]podEndpoint, 0, len(endpointSet)) - for ep := range endpointSet { - endpoints = append(endpoints, ep) - } - return endpoints, nil } } - - return nil, fmt.Errorf("no endpoints for target port %v in service %s", targetPort, svc.Name) + if len(endpointSet) == 0 { + return nil, fmt.Errorf("no endpoints for target port %v in service %s", targetPort, svc.Name) + } + endpoints := make([]podEndpoint, 0, len(endpointSet)) + for ep := range endpointSet { + endpoints = append(endpoints, ep) + } + return endpoints, nil } func (lbc *LoadBalancerController) getEndpointsForPort(endps api_v1.Endpoints, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index d0e03ed83c..d45549c894 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -472,6 +472,8 @@ func TestFormatWarningsMessages(t *testing.T) { } func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { + var endpointPort int32 + endpointPort = 8080 lbc := LoadBalancerController{ isNginxPlus: true, @@ -479,43 +481,129 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { backendServicePort := networking.ServiceBackendPort{ Number: 8080, + Name: "foo", } tests := []struct { - desc string - targetPort int32 - svcEndpointSlices []discovery_v1.EndpointSlice - expectedEps []podEndpoint + desc string + targetPort int32 + svcEndpointSlices []discovery_v1.EndpointSlice + expectedEndpointSlices []podEndpoint }{ { desc: "duplicate endpoints in an endpointslice", targetPort: 8080, - expectedEps: []podEndpoint{ + expectedEndpointSlices: []podEndpoint{ { Address: "1.2.3.4:8080", - MeshPodOwner: configs.MeshPodOwner{ - OwnerType: "deployment", - OwnerName: "deploy-1", + }, + }, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + { + Addresses: []string{ + "1.2.3.4", + }, + }, + }, + }, + }, + }, + { + desc: "two different endpoints in one endpoint slice", + targetPort: 8080, + expectedEndpointSlices: []podEndpoint{ + { + Address: "1.2.3.4:8080", + }, + { + Address: "5.6.7.8:8080", + }, + }, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + { + Addresses: []string{ + "5.6.7.8", + }, + }, }, }, }, + }, + { + desc: "duplicate endpoints across two endpointslices", + targetPort: 8080, + expectedEndpointSlices: []podEndpoint{ + { + Address: "1.2.3.4:8080", + }, + { + Address: "5.6.7.8:8080", + }, + { + Address: "10.0.0.1:8080", + }, + }, svcEndpointSlices: []discovery_v1.EndpointSlice{ { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, Endpoints: []discovery_v1.Endpoint{ { Addresses: []string{ "1.2.3.4", }, }, + { + Addresses: []string{ + "5.6.7.8", + }, + }, }, }, { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, Endpoints: []discovery_v1.Endpoint{ { Addresses: []string{ "1.2.3.4", }, }, + { + Addresses: []string{ + "10.0.0.1", + }, + }, }, }, }, @@ -539,20 +627,16 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { }, Status: api_v1.ServiceStatus{}, } - for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - //gotEndps := getEndpointsBySubselectedPods(test.targetPort, pods, svcEps) - for _, endpointSlice := range test.svcEndpointSlices { - _, _ = lbc.getEndpointsForPortFromEndpointSlice(endpointSlice, backendServicePort, &svc) - } + gotEndpoints, _ := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &svc) - //if !reflect.DeepEqual(gotEndps, test.expectedEps) { - // t.Errorf("getEndpointsBySubselectedPods() = %v, want %v", gotEndps, test.expectedEps) - //} + if !reflect.DeepEqual(gotEndpoints, test.expectedEndpointSlices) { + t.Errorf("lbc.getEndpointsForPortFromEndpointSlices() got %v, want %v", + gotEndpoints, test.expectedEndpointSlices) + } }) } - } func TestGetEndpointsBySubselectedPods(t *testing.T) { From 0d00a95193253c9ceca81942bd23530b96e51a44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Fri, 11 Nov 2022 14:12:54 +0000 Subject: [PATCH 07/30] Remove commented out code --- internal/k8s/controller.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 097e880e77..51c3f038f3 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -3549,18 +3549,6 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ return nil, false, err } - //Check if our EndpointSlices object has endpoints in it. - //if len(endpointSlices) > 0 { - // for _, endpointSlice := range endpointSlices { - // podEndpoints, podEndpointsErr := lbc.getEndpointsForPortFromEndpointSlices(endpointSlice, backend.Service.Port, svc) - // result = append(result, podEndpoints...) - // if podEndpointsErr != nil { - // return nil, false, podEndpointsErr - // } - // } - // return result, false, nil - //} - if len(endpointSlices) > 0 { podEndpoints, podEndpointsErr := lbc.getEndpointsForPortFromEndpointSlices(endpointSlices, backend.Service.Port, svc) if podEndpointsErr != nil { From 5e8a405ede7183a834eeef1eb9e69cc5b684aa43 Mon Sep 17 00:00:00 2001 From: Ciara Stacke <18287516+ciarams87@users.noreply.github.com> Date: Thu, 10 Nov 2022 15:12:29 +0000 Subject: [PATCH 08/30] Create separate type for namespaced watchers --- internal/k8s/controller.go | 669 ++++++++++++++------------------ internal/k8s/controller_test.go | 18 +- internal/k8s/status.go | 96 ++--- internal/k8s/status_test.go | 37 +- 4 files changed, 371 insertions(+), 449 deletions(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 64822d1d62..1e2dc1f01a 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -105,30 +105,12 @@ type LoadBalancerController struct { dynClient dynamic.Interface restConfig *rest.Config cacheSyncs []cache.InformerSynced - sharedInformerFactory []informers.SharedInformerFactory - confSharedInformerFactory []k8s_nginx_informers.SharedInformerFactory - secretInformerFactory []informers.SharedInformerFactory + namespacedInformers map[string]*namespacedInformer configMapController cache.Controller - dynInformerFactory []dynamicinformer.DynamicSharedInformerFactory globalConfigurationController cache.Controller ingressLinkInformer cache.SharedIndexInformer - ingressLister []storeToIngressLister - svcLister []cache.Store - endpointLister []storeToEndpointLister configMapLister storeToConfigMapLister - podLister []indexerToPodLister - secretLister []cache.Store - virtualServerLister []cache.Store - virtualServerRouteLister []cache.Store - appProtectPolicyLister []cache.Store - appProtectLogConfLister []cache.Store - appProtectDosPolicyLister []cache.Store - appProtectDosLogConfLister []cache.Store - appProtectDosProtectedLister []cache.Store globalConfigurationLister cache.Store - appProtectUserSigLister []cache.Store - transportServerLister []cache.Store - policyLister []cache.Store ingressLinkLister cache.Store syncQueue *taskQueue ctx context.Context @@ -277,45 +259,12 @@ func NewLoadBalancerController(input NewLoadBalancerControllerInput) *LoadBalanc glog.V(3).Infof("Nginx Ingress Controller has class: %v", input.IngressClass) + lbc.namespacedInformers = make(map[string]*namespacedInformer) for _, ns := range lbc.namespaceList { - lbc.sharedInformerFactory = append(lbc.sharedInformerFactory, informers.NewSharedInformerFactoryWithOptions(lbc.client, input.ResyncPeriod, informers.WithNamespace(ns))) + lbc.newNamespacedInformer(ns) } - // create handlers for resources we care about - lbc.addIngressHandler(createIngressHandlers(lbc)) - lbc.addServiceHandler(createServiceHandlers(lbc)) - lbc.addEndpointHandler(createEndpointHandlers(lbc)) - lbc.addPodHandler() - - secretsTweakListOptionsFunc := func(options *meta_v1.ListOptions) { - // Filter for helm release secrets. - helmSecretSelector := fields.OneTermNotEqualSelector(typeKeyword, helmReleaseType) - baseSelector, err := fields.ParseSelector(options.FieldSelector) - - if err != nil { - options.FieldSelector = helmSecretSelector.String() - } else { - options.FieldSelector = fields.AndSelectors(baseSelector, helmSecretSelector).String() - } - } - - // Creating a separate informer for secrets. - for _, ns := range lbc.secretNamespaceList { - lbc.secretInformerFactory = append(lbc.secretInformerFactory, informers.NewSharedInformerFactoryWithOptions(lbc.client, input.ResyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(secretsTweakListOptionsFunc))) - } - - lbc.addSecretHandler(createSecretHandlers(lbc)) - if lbc.areCustomResourcesEnabled { - for _, ns := range lbc.namespaceList { - lbc.confSharedInformerFactory = append(lbc.confSharedInformerFactory, k8s_nginx_informers.NewSharedInformerFactoryWithOptions(lbc.confClient, input.ResyncPeriod, k8s_nginx_informers.WithNamespace(ns))) - } - - lbc.addVirtualServerHandler(createVirtualServerHandlers(lbc)) - lbc.addVirtualServerRouteHandler(createVirtualServerRouteHandlers(lbc)) - lbc.addTransportServerHandler(createTransportServerHandlers(lbc)) - lbc.addPolicyHandler(createPolicyHandlers(lbc)) - if input.GlobalConfiguration != "" { lbc.watchGlobalConfiguration = true ns, name, _ := ParseNamespaceName(input.GlobalConfiguration) @@ -323,23 +272,6 @@ func NewLoadBalancerController(input NewLoadBalancerControllerInput) *LoadBalanc } } - if lbc.appProtectEnabled || lbc.appProtectDosEnabled { - for _, ns := range lbc.namespaceList { - lbc.dynInformerFactory = append(lbc.dynInformerFactory, dynamicinformer.NewFilteredDynamicSharedInformerFactory(lbc.dynClient, 0, ns, nil)) - } - if lbc.appProtectEnabled { - lbc.addAppProtectPolicyHandler(createAppProtectPolicyHandlers(lbc)) - lbc.addAppProtectLogConfHandler(createAppProtectLogConfHandlers(lbc)) - lbc.addAppProtectUserSigHandler(createAppProtectUserSigHandlers(lbc)) - } - - if lbc.appProtectDosEnabled { - lbc.addAppProtectDosPolicyHandler(createAppProtectDosPolicyHandlers(lbc)) - lbc.addAppProtectDosLogConfHandler(createAppProtectDosLogConfHandlers(lbc)) - lbc.addAppProtectDosProtectedResourceHandler(createAppProtectDosProtectedResourceHandlers(lbc)) - } - } - if input.ConfigMaps != "" { nginxConfigMapsNS, nginxConfigMapsName, err := ParseNamespaceName(input.ConfigMaps) if err != nil { @@ -360,17 +292,13 @@ func NewLoadBalancerController(input NewLoadBalancerControllerInput) *LoadBalanc } lbc.statusUpdater = &statusUpdater{ - client: input.KubeClient, - namespace: input.ControllerNamespace, - externalServiceName: input.ExternalServiceName, - ingressLister: lbc.ingressLister, - virtualServerLister: lbc.virtualServerLister, - virtualServerRouteLister: lbc.virtualServerRouteLister, - transportServerLister: lbc.transportServerLister, - policyLister: lbc.policyLister, - keyFunc: keyFunc, - confClient: input.ConfClient, - hasCorrectIngressClass: lbc.HasCorrectIngressClass, + client: input.KubeClient, + namespace: input.ControllerNamespace, + externalServiceName: input.ExternalServiceName, + namespacedInformers: lbc.namespacedInformers, + keyFunc: keyFunc, + confClient: input.ConfClient, + hasCorrectIngressClass: lbc.HasCorrectIngressClass, } lbc.configuration = NewConfiguration( @@ -396,6 +324,99 @@ func NewLoadBalancerController(input NewLoadBalancerControllerInput) *LoadBalanc return lbc } +type namespacedInformer struct { + // namespace string + sharedInformerFactory informers.SharedInformerFactory + confSharedInformerFactory k8s_nginx_informers.SharedInformerFactory + secretInformerFactory informers.SharedInformerFactory + dynInformerFactory dynamicinformer.DynamicSharedInformerFactory + ingressLister storeToIngressLister + svcLister cache.Store + endpointLister storeToEndpointLister + podLister indexerToPodLister + secretLister cache.Store + virtualServerLister cache.Store + virtualServerRouteLister cache.Store + appProtectPolicyLister cache.Store + appProtectLogConfLister cache.Store + appProtectDosPolicyLister cache.Store + appProtectDosLogConfLister cache.Store + appProtectDosProtectedLister cache.Store + appProtectUserSigLister cache.Store + transportServerLister cache.Store + policyLister cache.Store + isSecretsEnabledNamespace bool + areCustomResourcesEnabled bool + appProtectEnabled bool + appProtectDosEnabled bool + stopCh <-chan struct{} +} + +func (lbc *LoadBalancerController) newNamespacedInformer(ns string) { + nsi := &namespacedInformer{} + nsi.sharedInformerFactory = informers.NewSharedInformerFactoryWithOptions(lbc.client, lbc.resync, informers.WithNamespace(ns)) + + // create handlers for resources we care about + lbc.addIngressHandler(createIngressHandlers(lbc), nsi) + lbc.addServiceHandler(createServiceHandlers(lbc), nsi) + lbc.addEndpointHandler(createEndpointHandlers(lbc), nsi) + lbc.addPodHandler(nsi) + + secretsTweakListOptionsFunc := func(options *meta_v1.ListOptions) { + // Filter for helm release secrets. + helmSecretSelector := fields.OneTermNotEqualSelector(typeKeyword, helmReleaseType) + baseSelector, err := fields.ParseSelector(options.FieldSelector) + + if err != nil { + options.FieldSelector = helmSecretSelector.String() + } else { + options.FieldSelector = fields.AndSelectors(baseSelector, helmSecretSelector).String() + } + } + + // Check if secrets informer should be created for this namespace + for _, v := range lbc.secretNamespaceList { + if v == ns { + nsi.isSecretsEnabledNamespace = true + nsi.secretInformerFactory = informers.NewSharedInformerFactoryWithOptions(lbc.client, lbc.resync, informers.WithNamespace(ns), informers.WithTweakListOptions(secretsTweakListOptionsFunc)) + lbc.addSecretHandler(createSecretHandlers(lbc), nsi) + break + } + } + + if lbc.areCustomResourcesEnabled { + nsi.areCustomResourcesEnabled = true + nsi.confSharedInformerFactory = k8s_nginx_informers.NewSharedInformerFactoryWithOptions(lbc.confClient, lbc.resync, k8s_nginx_informers.WithNamespace(ns)) + + lbc.addVirtualServerHandler(createVirtualServerHandlers(lbc), nsi) + lbc.addVirtualServerRouteHandler(createVirtualServerRouteHandlers(lbc), nsi) + lbc.addTransportServerHandler(createTransportServerHandlers(lbc), nsi) + lbc.addPolicyHandler(createPolicyHandlers(lbc), nsi) + + } + + if lbc.appProtectEnabled || lbc.appProtectDosEnabled { + for _, ns := range lbc.namespaceList { + nsi.dynInformerFactory = dynamicinformer.NewFilteredDynamicSharedInformerFactory(lbc.dynClient, 0, ns, nil) + } + if lbc.appProtectEnabled { + nsi.appProtectEnabled = true + lbc.addAppProtectPolicyHandler(createAppProtectPolicyHandlers(lbc), nsi) + lbc.addAppProtectLogConfHandler(createAppProtectLogConfHandlers(lbc), nsi) + lbc.addAppProtectUserSigHandler(createAppProtectUserSigHandlers(lbc), nsi) + } + + if lbc.appProtectDosEnabled { + nsi.appProtectDosEnabled = true + lbc.addAppProtectDosPolicyHandler(createAppProtectDosPolicyHandlers(lbc), nsi) + lbc.addAppProtectDosLogConfHandler(createAppProtectDosLogConfHandlers(lbc), nsi) + lbc.addAppProtectDosProtectedResourceHandler(createAppProtectDosProtectedResourceHandlers(lbc), nsi) + } + } + + lbc.namespacedInformers[ns] = nsi +} + // addLeaderHandler adds the handler for leader election to the controller func (lbc *LoadBalancerController) addLeaderHandler(leaderHandler leaderelection.LeaderCallbacks) { var err error @@ -411,115 +432,95 @@ func (lbc *LoadBalancerController) AddSyncQueue(item interface{}) { } // addAppProtectPolicyHandler creates dynamic informers for custom appprotect policy resource -func (lbc *LoadBalancerController) addAppProtectPolicyHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, dif := range lbc.dynInformerFactory { - informer := dif.ForResource(appprotect.PolicyGVR).Informer() - informer.AddEventHandler(handlers) - lbc.appProtectPolicyLister = append(lbc.appProtectPolicyLister, informer.GetStore()) +func (lbc *LoadBalancerController) addAppProtectPolicyHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.dynInformerFactory.ForResource(appprotect.PolicyGVR).Informer() + informer.AddEventHandler(handlers) + nsi.appProtectPolicyLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addAppProtectLogConfHandler creates dynamic informer for custom appprotect logging config resource -func (lbc *LoadBalancerController) addAppProtectLogConfHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, dif := range lbc.dynInformerFactory { - informer := dif.ForResource(appprotect.LogConfGVR).Informer() - informer.AddEventHandler(handlers) - lbc.appProtectLogConfLister = append(lbc.appProtectLogConfLister, informer.GetStore()) +func (lbc *LoadBalancerController) addAppProtectLogConfHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.dynInformerFactory.ForResource(appprotect.LogConfGVR).Informer() + informer.AddEventHandler(handlers) + nsi.appProtectLogConfLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addAppProtectUserSigHandler creates dynamic informer for custom appprotect user defined signature resource -func (lbc *LoadBalancerController) addAppProtectUserSigHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, dif := range lbc.dynInformerFactory { - informer := dif.ForResource(appprotect.UserSigGVR).Informer() - informer.AddEventHandler(handlers) - lbc.appProtectUserSigLister = append(lbc.appProtectUserSigLister, informer.GetStore()) +func (lbc *LoadBalancerController) addAppProtectUserSigHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.dynInformerFactory.ForResource(appprotect.UserSigGVR).Informer() + informer.AddEventHandler(handlers) + nsi.appProtectUserSigLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addAppProtectDosPolicyHandler creates dynamic informers for custom appprotectdos policy resource -func (lbc *LoadBalancerController) addAppProtectDosPolicyHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, dif := range lbc.dynInformerFactory { - informer := dif.ForResource(appprotectdos.DosPolicyGVR).Informer() - informer.AddEventHandler(handlers) - lbc.appProtectDosPolicyLister = append(lbc.appProtectDosPolicyLister, informer.GetStore()) +func (lbc *LoadBalancerController) addAppProtectDosPolicyHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.dynInformerFactory.ForResource(appprotectdos.DosPolicyGVR).Informer() + informer.AddEventHandler(handlers) + nsi.appProtectDosPolicyLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addAppProtectDosLogConfHandler creates dynamic informer for custom appprotectdos logging config resource -func (lbc *LoadBalancerController) addAppProtectDosLogConfHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, dif := range lbc.dynInformerFactory { - informer := dif.ForResource(appprotectdos.DosLogConfGVR).Informer() - informer.AddEventHandler(handlers) - lbc.appProtectDosLogConfLister = append(lbc.appProtectDosLogConfLister, informer.GetStore()) +func (lbc *LoadBalancerController) addAppProtectDosLogConfHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.dynInformerFactory.ForResource(appprotectdos.DosLogConfGVR).Informer() + informer.AddEventHandler(handlers) + nsi.appProtectDosLogConfLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addAppProtectDosLogConfHandler creates dynamic informers for custom appprotectdos logging config resource -func (lbc *LoadBalancerController) addAppProtectDosProtectedResourceHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, cif := range lbc.confSharedInformerFactory { - informer := cif.Appprotectdos().V1beta1().DosProtectedResources().Informer() - informer.AddEventHandler(handlers) - lbc.appProtectDosProtectedLister = append(lbc.appProtectDosProtectedLister, informer.GetStore()) +func (lbc *LoadBalancerController) addAppProtectDosProtectedResourceHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.confSharedInformerFactory.Appprotectdos().V1beta1().DosProtectedResources().Informer() + informer.AddEventHandler(handlers) + nsi.appProtectDosProtectedLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addSecretHandler adds the handler for secrets to the controller -func (lbc *LoadBalancerController) addSecretHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, sif := range lbc.secretInformerFactory { - informer := sif.Core().V1().Secrets().Informer() - informer.AddEventHandler(handlers) - lbc.secretLister = append(lbc.secretLister, informer.GetStore()) +func (lbc *LoadBalancerController) addSecretHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.secretInformerFactory.Core().V1().Secrets().Informer() + informer.AddEventHandler(handlers) + nsi.secretLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addServiceHandler adds the handler for services to the controller -func (lbc *LoadBalancerController) addServiceHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, sif := range lbc.sharedInformerFactory { - informer := sif.Core().V1().Services().Informer() - informer.AddEventHandler(handlers) - lbc.svcLister = append(lbc.svcLister, informer.GetStore()) +func (lbc *LoadBalancerController) addServiceHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.sharedInformerFactory.Core().V1().Services().Informer() + informer.AddEventHandler(handlers) + nsi.svcLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addIngressHandler adds the handler for ingresses to the controller -func (lbc *LoadBalancerController) addIngressHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, sif := range lbc.sharedInformerFactory { - informer := sif.Networking().V1().Ingresses().Informer() - informer.AddEventHandler(handlers) - lbc.ingressLister = append(lbc.ingressLister, storeToIngressLister{Store: informer.GetStore()}) +func (lbc *LoadBalancerController) addIngressHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.sharedInformerFactory.Networking().V1().Ingresses().Informer() + informer.AddEventHandler(handlers) + nsi.ingressLister = storeToIngressLister{Store: informer.GetStore()} - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addEndpointHandler adds the handler for endpoints to the controller -func (lbc *LoadBalancerController) addEndpointHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, sif := range lbc.sharedInformerFactory { - informer := sif.Core().V1().Endpoints().Informer() - informer.AddEventHandler(handlers) - var el storeToEndpointLister - el.Store = informer.GetStore() - lbc.endpointLister = append(lbc.endpointLister, el) +func (lbc *LoadBalancerController) addEndpointHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.sharedInformerFactory.Core().V1().Endpoints().Informer() + informer.AddEventHandler(handlers) + var el storeToEndpointLister + el.Store = informer.GetStore() + nsi.endpointLister = el - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } // addConfigMapHandler adds the handler for config maps to the controller @@ -537,43 +538,35 @@ func (lbc *LoadBalancerController) addConfigMapHandler(handlers cache.ResourceEv lbc.cacheSyncs = append(lbc.cacheSyncs, lbc.configMapController.HasSynced) } -func (lbc *LoadBalancerController) addPodHandler() { - for _, sif := range lbc.sharedInformerFactory { - informer := sif.Core().V1().Pods().Informer() - lbc.podLister = append(lbc.podLister, indexerToPodLister{Indexer: informer.GetIndexer()}) +func (lbc *LoadBalancerController) addPodHandler(nsi *namespacedInformer) { + informer := nsi.sharedInformerFactory.Core().V1().Pods().Informer() + nsi.podLister = indexerToPodLister{Indexer: informer.GetIndexer()} - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } -func (lbc *LoadBalancerController) addVirtualServerHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, cif := range lbc.confSharedInformerFactory { - informer := cif.K8s().V1().VirtualServers().Informer() - informer.AddEventHandler(handlers) - lbc.virtualServerLister = append(lbc.virtualServerLister, informer.GetStore()) +func (lbc *LoadBalancerController) addVirtualServerHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.confSharedInformerFactory.K8s().V1().VirtualServers().Informer() + informer.AddEventHandler(handlers) + nsi.virtualServerLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } -func (lbc *LoadBalancerController) addVirtualServerRouteHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, cif := range lbc.confSharedInformerFactory { - informer := cif.K8s().V1().VirtualServerRoutes().Informer() - informer.AddEventHandler(handlers) - lbc.virtualServerRouteLister = append(lbc.virtualServerRouteLister, informer.GetStore()) +func (lbc *LoadBalancerController) addVirtualServerRouteHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.confSharedInformerFactory.K8s().V1().VirtualServerRoutes().Informer() + informer.AddEventHandler(handlers) + nsi.virtualServerRouteLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } -func (lbc *LoadBalancerController) addPolicyHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, cif := range lbc.confSharedInformerFactory { - informer := cif.K8s().V1().Policies().Informer() - informer.AddEventHandler(handlers) - lbc.policyLister = append(lbc.policyLister, informer.GetStore()) +func (lbc *LoadBalancerController) addPolicyHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.confSharedInformerFactory.K8s().V1().Policies().Informer() + informer.AddEventHandler(handlers) + nsi.policyLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } func (lbc *LoadBalancerController) addGlobalConfigurationHandler(handlers cache.ResourceEventHandlerFuncs, namespace string, name string) { @@ -590,14 +583,12 @@ func (lbc *LoadBalancerController) addGlobalConfigurationHandler(handlers cache. lbc.cacheSyncs = append(lbc.cacheSyncs, lbc.globalConfigurationController.HasSynced) } -func (lbc *LoadBalancerController) addTransportServerHandler(handlers cache.ResourceEventHandlerFuncs) { - for _, cif := range lbc.confSharedInformerFactory { - informer := cif.K8s().V1alpha1().TransportServers().Informer() - informer.AddEventHandler(handlers) - lbc.transportServerLister = append(lbc.transportServerLister, informer.GetStore()) +func (lbc *LoadBalancerController) addTransportServerHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { + informer := nsi.confSharedInformerFactory.K8s().V1alpha1().TransportServers().Informer() + informer.AddEventHandler(handlers) + nsi.transportServerLister = informer.GetStore() - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) - } + lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } func (lbc *LoadBalancerController) addIngressLinkHandler(handlers cache.ResourceEventHandlerFuncs, name string) { @@ -636,33 +627,21 @@ func (lbc *LoadBalancerController) Run() { go lbc.leaderElector.Run(lbc.ctx) } - for _, sif := range lbc.sharedInformerFactory { - go sif.Start(lbc.ctx.Done()) - } - - for _, secif := range lbc.secretInformerFactory { - go secif.Start(lbc.ctx.Done()) + for _, nif := range lbc.namespacedInformers { + nif.stopCh = lbc.ctx.Done() + nif.start() } if lbc.watchNginxConfigMaps { go lbc.configMapController.Run(lbc.ctx.Done()) } - if lbc.areCustomResourcesEnabled { - for _, cif := range lbc.confSharedInformerFactory { - go cif.Start(lbc.ctx.Done()) - } - } + if lbc.watchGlobalConfiguration { go lbc.globalConfigurationController.Run(lbc.ctx.Done()) } if lbc.watchIngressLink { go lbc.ingressLinkInformer.Run(lbc.ctx.Done()) } - if lbc.appProtectEnabled || lbc.appProtectDosEnabled { - for _, dif := range lbc.dynInformerFactory { - go dif.Start(lbc.ctx.Done()) - } - } glog.V(3).Infof("Waiting for %d caches to sync", len(lbc.cacheSyncs)) @@ -678,13 +657,46 @@ func (lbc *LoadBalancerController) Run() { <-lbc.ctx.Done() } -// Stop shutdowns the load balancer controller +// Stop shutsdown the load balancer controller func (lbc *LoadBalancerController) Stop() { lbc.cancel() - lbc.syncQueue.Shutdown() } +func (nsi *namespacedInformer) start() { + go nsi.sharedInformerFactory.Start(nsi.stopCh) + + if nsi.isSecretsEnabledNamespace { + go nsi.secretInformerFactory.Start(nsi.stopCh) + } + + if nsi.areCustomResourcesEnabled { + go nsi.confSharedInformerFactory.Start(nsi.stopCh) + } + + if nsi.appProtectEnabled || nsi.appProtectDosEnabled { + go nsi.dynInformerFactory.Start(nsi.stopCh) + } +} + +func (lbc *LoadBalancerController) getNamespacedInformer(ns string) *namespacedInformer { + var nsi *namespacedInformer + var isGlobalNs bool + var exists bool + + nsi, isGlobalNs = lbc.namespacedInformers[""] + + if !isGlobalNs { + // get the correct namespaced informers + nsi, exists = lbc.namespacedInformers[ns] + if !exists { + // we are not watching this namespace + return nil + } + } + return nsi +} + func (lbc *LoadBalancerController) syncEndpoints(task task) { key := task.Key var obj interface{} @@ -692,12 +704,8 @@ func (lbc *LoadBalancerController) syncEndpoints(task task) { var err error glog.V(3).Infof("Syncing endpoints %v", key) - for _, el := range lbc.endpointLister { - obj, endpExists, err = el.GetByKey(key) - if endpExists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, endpExists, err = lbc.getNamespacedInformer(ns).endpointLister.GetByKey(key) if err != nil { lbc.syncQueue.Requeue(task, err) @@ -857,8 +865,8 @@ func (lbc *LoadBalancerController) updateAllConfigs() { // As a result, the IC will generate configuration for that resource assuming that the Secret is missing and // it will report warnings. (See https://github.com/nginxinc/kubernetes-ingress/issues/1448 ) func (lbc *LoadBalancerController) preSyncSecrets() { - for _, sl := range lbc.secretLister { - objects := sl.List() + for _, ni := range lbc.namespacedInformers { + objects := ni.secretLister.List() glog.V(3).Infof("PreSync %d Secrets", len(objects)) for _, obj := range objects { @@ -1010,12 +1018,8 @@ func (lbc *LoadBalancerController) syncPolicy(task task) { var polExists bool var err error - for _, pl := range lbc.policyLister { - obj, polExists, err = pl.GetByKey(key) - if polExists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, polExists, err = lbc.getNamespacedInformer(ns).policyLister.GetByKey(key) if err != nil { lbc.syncQueue.Requeue(task, err) @@ -1073,12 +1077,8 @@ func (lbc *LoadBalancerController) syncTransportServer(task task) { var tsExists bool var err error - for _, tl := range lbc.transportServerLister { - obj, tsExists, err = tl.GetByKey(key) - if tsExists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, tsExists, err = lbc.getNamespacedInformer(ns).transportServerLister.GetByKey(key) if err != nil { lbc.syncQueue.Requeue(task, err) @@ -1156,12 +1156,8 @@ func (lbc *LoadBalancerController) syncVirtualServer(task task) { var vsExists bool var err error - for _, vl := range lbc.virtualServerLister { - obj, vsExists, err = vl.GetByKey(key) - if vsExists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, vsExists, err = lbc.getNamespacedInformer(ns).virtualServerLister.GetByKey(key) if err != nil { lbc.syncQueue.Requeue(task, err) @@ -1269,12 +1265,8 @@ func (lbc *LoadBalancerController) processChanges(changes []ResourceChange) { var vsExists bool var err error - for _, vl := range lbc.virtualServerLister { - _, vsExists, err = vl.GetByKey(key) - if vsExists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + _, vsExists, err = lbc.getNamespacedInformer(ns).virtualServerLister.GetByKey(key) if err != nil { glog.Errorf("Error when getting VirtualServer for %v: %v", key, err) @@ -1296,12 +1288,8 @@ func (lbc *LoadBalancerController) processChanges(changes []ResourceChange) { var ingExists bool var err error - for _, il := range lbc.ingressLister { - _, ingExists, err = il.GetByKeySafe(key) - if ingExists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + _, ingExists, err = lbc.getNamespacedInformer(ns).ingressLister.GetByKeySafe(key) if err != nil { glog.Errorf("Error when getting Ingress for %v: %v", key, err) @@ -1322,12 +1310,8 @@ func (lbc *LoadBalancerController) processChanges(changes []ResourceChange) { var tsExists bool var err error - for _, tl := range lbc.transportServerLister { - _, tsExists, err = tl.GetByKey(key) - if tsExists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + _, tsExists, err = lbc.getNamespacedInformer(ns).transportServerLister.GetByKey(key) if err != nil { glog.Errorf("Error when getting TransportServer for %v: %v", key, err) @@ -1916,12 +1900,8 @@ func (lbc *LoadBalancerController) syncVirtualServerRoute(task task) { var exists bool var err error - for _, vrl := range lbc.virtualServerRouteLister { - obj, exists, err = vrl.GetByKey(key) - if exists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, exists, err = lbc.getNamespacedInformer(ns).virtualServerRouteLister.GetByKey(key) if err != nil { lbc.syncQueue.Requeue(task, err) @@ -1952,12 +1932,9 @@ func (lbc *LoadBalancerController) syncIngress(task task) { var ingExists bool var err error - for _, il := range lbc.ingressLister { - ing, ingExists, err = il.GetByKeySafe(key) - if ingExists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + ing, ingExists, err = lbc.getNamespacedInformer(ns).ingressLister.GetByKeySafe(key) + if err != nil { lbc.syncQueue.Requeue(task, err) return @@ -2010,12 +1987,8 @@ func (lbc *LoadBalancerController) syncService(task task) { var exists bool var err error - for _, sl := range lbc.svcLister { - obj, exists, err = sl.GetByKey(key) - if exists { - break - } - } + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, exists, err = lbc.getNamespacedInformer(ns).svcLister.GetByKey(key) if err != nil { lbc.syncQueue.Requeue(task, err) @@ -2116,20 +2089,15 @@ func (lbc *LoadBalancerController) syncSecret(task task) { var secrExists bool var err error - for _, sl := range lbc.secretLister { - obj, secrExists, err = sl.GetByKey(key) - if secrExists { - break - } - } + namespace, name, err := ParseNamespaceName(key) if err != nil { - lbc.syncQueue.Requeue(task, err) + glog.Warningf("Secret key %v is invalid: %v", key, err) return } + obj, secrExists, err = lbc.getNamespacedInformer(namespace).secretLister.GetByKey(key) - namespace, name, err := ParseNamespaceName(key) if err != nil { - glog.Warningf("Secret key %v is invalid: %v", key, err) + lbc.syncQueue.Requeue(task, err) return } @@ -2261,8 +2229,8 @@ func getStatusFromEventTitle(eventTitle string) string { func (lbc *LoadBalancerController) updateVirtualServersStatusFromEvents() error { var allErrs []error - for _, vl := range lbc.virtualServerLister { - for _, obj := range vl.List() { + for _, nsi := range lbc.namespacedInformers { + for _, obj := range nsi.virtualServerLister.List() { vs := obj.(*conf_v1.VirtualServer) if !lbc.HasCorrectIngressClass(vs) { @@ -2305,8 +2273,8 @@ func (lbc *LoadBalancerController) updateVirtualServersStatusFromEvents() error func (lbc *LoadBalancerController) updateVirtualServerRoutesStatusFromEvents() error { var allErrs []error - for _, vsrl := range lbc.virtualServerRouteLister { - for _, obj := range vsrl.List() { + for _, nsi := range lbc.namespacedInformers { + for _, obj := range nsi.virtualServerRouteLister.List() { vsr := obj.(*conf_v1.VirtualServerRoute) if !lbc.HasCorrectIngressClass(vsr) { @@ -2349,8 +2317,8 @@ func (lbc *LoadBalancerController) updateVirtualServerRoutesStatusFromEvents() e func (lbc *LoadBalancerController) updatePoliciesStatus() error { var allErrs []error - for _, pl := range lbc.policyLister { - for _, obj := range pl.List() { + for _, nsi := range lbc.namespacedInformers { + for _, obj := range nsi.policyLister.List() { pol := obj.(*conf_v1.Policy) err := validation.ValidatePolicy(pol, lbc.isNginxPlus, lbc.enableOIDC, lbc.appProtectEnabled) @@ -2379,8 +2347,8 @@ func (lbc *LoadBalancerController) updatePoliciesStatus() error { func (lbc *LoadBalancerController) updateTransportServersStatusFromEvents() error { var allErrs []error - for _, tl := range lbc.transportServerLister { - for _, obj := range tl.List() { + for _, nsi := range lbc.namespacedInformers { + for _, obj := range nsi.transportServerLister.List() { ts := obj.(*conf_v1alpha1.TransportServer) events, err := lbc.client.CoreV1().Events(ts.Namespace).List(context.TODO(), @@ -2919,8 +2887,8 @@ func createPolicyMap(policies []*conf_v1.Policy) map[string]*conf_v1.Policy { func (lbc *LoadBalancerController) getAllPolicies() []*conf_v1.Policy { var policies []*conf_v1.Policy - for _, pl := range lbc.policyLister { - for _, obj := range pl.List() { + for _, nsi := range lbc.namespacedInformers { + for _, obj := range nsi.policyLister.List() { pol := obj.(*conf_v1.Policy) err := validation.ValidatePolicy(pol, lbc.isNginxPlus, lbc.enableOIDC, lbc.appProtectEnabled) @@ -2952,12 +2920,8 @@ func (lbc *LoadBalancerController) getPolicies(policies []conf_v1.PolicyReferenc var exists bool var err error - for _, pl := range lbc.policyLister { - policyObj, exists, err = pl.GetByKey(policyKey) - if exists { - break - } - } + policyObj, exists, err = lbc.getNamespacedInformer(polNamespace).policyLister.GetByKey(policyKey) + if err != nil { errors = append(errors, fmt.Errorf("failed to get policy %s: %w", policyKey, err)) continue @@ -3306,23 +3270,15 @@ func (lbc *LoadBalancerController) getEndpointsForSubselector(namespace string, func (lbc *LoadBalancerController) getEndpointsForServiceWithSubselector(targetPort int32, subselector map[string]string, svc *api_v1.Service) (endps []podEndpoint, err error) { var pods []*api_v1.Pod - for _, pl := range lbc.podLister { - pods, err = pl.ListByNamespace(svc.Namespace, labels.Merge(svc.Spec.Selector, subselector).AsSelector()) - if len(pods) > 0 { - break - } - } + nsi := lbc.getNamespacedInformer(svc.Namespace) + pods, err = nsi.podLister.ListByNamespace(svc.Namespace, labels.Merge(svc.Spec.Selector, subselector).AsSelector()) + if err != nil { return nil, fmt.Errorf("error getting pods in namespace %v that match the selector %v: %w", svc.Namespace, labels.Merge(svc.Spec.Selector, subselector), err) } var svcEps api_v1.Endpoints - for _, el := range lbc.endpointLister { - svcEps, err = el.GetServiceEndpoints(svc) - if err == nil { - break - } - } + svcEps, err = nsi.endpointLister.GetServiceEndpoints(svc) if err != nil { glog.V(3).Infof("Error getting endpoints for service %s from the cache: %v", svc.Name, err) return nil, err @@ -3382,12 +3338,9 @@ func (lbc *LoadBalancerController) getHealthChecksForIngressBackend(backend *net return nil } var pods []*api_v1.Pod - for _, pl := range lbc.podLister { - pods, err = pl.ListByNamespace(svc.Namespace, labels.Set(svc.Spec.Selector).AsSelector()) - if len(pods) > 0 { - break - } - } + nsi := lbc.getNamespacedInformer(svc.Namespace) + pods, err = nsi.podLister.ListByNamespace(svc.Namespace, labels.Set(svc.Spec.Selector).AsSelector()) + if err != nil { glog.V(3).Infof("Error fetching pods for namespace %v: %v", svc.Namespace, err) return nil @@ -3439,12 +3392,8 @@ func (lbc *LoadBalancerController) getExternalEndpointsForIngressBackend(backend func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networking.IngressBackend, svc *api_v1.Service) (result []podEndpoint, isExternal bool, err error) { var endps api_v1.Endpoints - for _, el := range lbc.endpointLister { - endps, err = el.GetServiceEndpoints(svc) - if err == nil { - break - } - } + endps, err = lbc.getNamespacedInformer(svc.Namespace).endpointLister.GetServiceEndpoints(svc) + if err != nil { if svc.Spec.Type == api_v1.ServiceTypeExternalName { if !lbc.isNginxPlus { @@ -3512,12 +3461,9 @@ func (lbc *LoadBalancerController) getPodOwnerTypeAndNameFromAddress(ns, name st var obj interface{} var exists bool var err error - for _, pl := range lbc.podLister { - obj, exists, err = pl.GetByKey(fmt.Sprintf("%s/%s", ns, name)) - if exists { - break - } - } + + obj, exists, err = lbc.getNamespacedInformer(ns).podLister.GetByKey(fmt.Sprintf("%s/%s", ns, name)) + if err != nil { glog.Warningf("could not get pod by key %s/%s: %v", ns, name, err) return "", "" @@ -3565,12 +3511,8 @@ func (lbc *LoadBalancerController) getTargetPort(svcPort api_v1.ServicePort, svc var pods []*api_v1.Pod var err error - for _, pl := range lbc.podLister { - pods, err = pl.ListByNamespace(svc.Namespace, labels.Set(svc.Spec.Selector).AsSelector()) - if len(pods) > 0 { - break - } - } + pods, err = lbc.getNamespacedInformer(svc.Namespace).podLister.ListByNamespace(svc.Namespace, labels.Set(svc.Spec.Selector).AsSelector()) + if err != nil { return 0, fmt.Errorf("error getting pod information: %w", err) } @@ -3606,12 +3548,9 @@ func (lbc *LoadBalancerController) getServiceForIngressBackend(backend *networki var svcObj interface{} var svcExists bool var err error - for _, sl := range lbc.svcLister { - svcObj, svcExists, err = sl.GetByKey(svcKey) - if svcExists { - break - } - } + + svcObj, svcExists, err = lbc.getNamespacedInformer(namespace).svcLister.GetByKey(svcKey) + if err != nil { return nil, err } @@ -3684,12 +3623,10 @@ func (lbc *LoadBalancerController) syncAppProtectPolicy(task task) { var obj interface{} var polExists bool var err error - for _, apl := range lbc.appProtectPolicyLister { - obj, polExists, err = apl.GetByKey(key) - if polExists { - break - } - } + + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, polExists, err = lbc.getNamespacedInformer(ns).appProtectPolicyLister.GetByKey(key) + if err != nil { lbc.syncQueue.Requeue(task, err) return @@ -3718,12 +3655,10 @@ func (lbc *LoadBalancerController) syncAppProtectLogConf(task task) { var obj interface{} var confExists bool var err error - for _, apl := range lbc.appProtectLogConfLister { - obj, confExists, err = apl.GetByKey(key) - if confExists { - break - } - } + + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, confExists, err = lbc.getNamespacedInformer(ns).appProtectLogConfLister.GetByKey(key) + if err != nil { lbc.syncQueue.Requeue(task, err) return @@ -3752,12 +3687,10 @@ func (lbc *LoadBalancerController) syncAppProtectUserSig(task task) { var obj interface{} var sigExists bool var err error - for _, apl := range lbc.appProtectUserSigLister { - obj, sigExists, err = apl.GetByKey(key) - if sigExists { - break - } - } + + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, sigExists, err = lbc.getNamespacedInformer(ns).appProtectUserSigLister.GetByKey(key) + if err != nil { lbc.syncQueue.Requeue(task, err) return @@ -3786,12 +3719,10 @@ func (lbc *LoadBalancerController) syncAppProtectDosPolicy(task task) { var obj interface{} var polExists bool var err error - for _, apl := range lbc.appProtectDosPolicyLister { - obj, polExists, err = apl.GetByKey(key) - if polExists { - break - } - } + + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, polExists, err = lbc.getNamespacedInformer(ns).appProtectDosPolicyLister.GetByKey(key) + if err != nil { lbc.syncQueue.Requeue(task, err) return @@ -3818,12 +3749,10 @@ func (lbc *LoadBalancerController) syncAppProtectDosLogConf(task task) { var obj interface{} var confExists bool var err error - for _, apl := range lbc.appProtectDosLogConfLister { - obj, confExists, err = apl.GetByKey(key) - if confExists { - break - } - } + + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, confExists, err = lbc.getNamespacedInformer(ns).appProtectDosLogConfLister.GetByKey(key) + if err != nil { lbc.syncQueue.Requeue(task, err) return @@ -3850,12 +3779,10 @@ func (lbc *LoadBalancerController) syncDosProtectedResource(task task) { var obj interface{} var confExists bool var err error - for _, apl := range lbc.appProtectDosProtectedLister { - obj, confExists, err = apl.GetByKey(key) - if confExists { - break - } - } + + ns, _, _ := cache.SplitMetaNamespaceKey(key) + obj, confExists, err = lbc.getNamespacedInformer(ns).appProtectDosProtectedLister.GetByKey(key) + if err != nil { lbc.syncQueue.Requeue(task, err) return diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 6034380aa1..3cf7c883d9 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -653,12 +653,12 @@ func TestGetPolicies(t *testing.T) { }, } - var pl []cache.Store - pl = append(pl, policyLister) + nsi := make(map[string]*namespacedInformer) + nsi[""] = &namespacedInformer{policyLister: policyLister} lbc := LoadBalancerController{ - isNginxPlus: true, - policyLister: pl, + isNginxPlus: true, + namespacedInformers: nsi, } policyRefs := []conf_v1.PolicyReference{ @@ -2297,13 +2297,13 @@ func TestPreSyncSecrets(t *testing.T) { } }, } - var sl []cache.Store - sl = append(sl, secretLister) + nsi := make(map[string]*namespacedInformer) + nsi[""] = &namespacedInformer{secretLister: secretLister} lbc := LoadBalancerController{ - isNginxPlus: true, - secretStore: secrets.NewEmptyFakeSecretsStore(), - secretLister: sl, + isNginxPlus: true, + secretStore: secrets.NewEmptyFakeSecretsStore(), + namespacedInformers: nsi, } lbc.preSyncSecrets() diff --git a/internal/k8s/status.go b/internal/k8s/status.go index 672c8aeeb8..78b9a67235 100644 --- a/internal/k8s/status.go +++ b/internal/k8s/status.go @@ -38,11 +38,7 @@ type statusUpdater struct { status []api_v1.LoadBalancerIngress statusInitialized bool keyFunc func(obj interface{}) (string, error) - ingressLister []storeToIngressLister - virtualServerLister []cache.Store - virtualServerRouteLister []cache.Store - transportServerLister []cache.Store - policyLister []cache.Store + namespacedInformers map[string]*namespacedInformer confClient k8s_nginx.Interface hasCorrectIngressClass func(interface{}) bool } @@ -111,6 +107,24 @@ func (su *statusUpdater) UpdateIngressStatus(ing networking.Ingress) error { return su.updateIngressWithStatus(ing, su.status) } +func (su *statusUpdater) getNamespacedInformer(ns string) *namespacedInformer { + var nsi *namespacedInformer + var isGlobalNs bool + var exists bool + + nsi, isGlobalNs = su.namespacedInformers[""] + + if !isGlobalNs { + // get the correct namespaced informers + nsi, exists = su.namespacedInformers[ns] + if !exists { + // we are not watching this namespace + return nil + } + } + return nsi +} + // updateIngressWithStatus sets the provided status on the selected Ingress. func (su *statusUpdater) updateIngressWithStatus(ing networking.Ingress, status []api_v1.LoadBalancerIngress) error { // Get an up-to-date Ingress from the Store @@ -120,14 +134,11 @@ func (su *statusUpdater) updateIngressWithStatus(ing networking.Ingress, status return err } + ns, _, _ := cache.SplitMetaNamespaceKey(key) var ingCopy *networking.Ingress var exists bool - for _, il := range su.ingressLister { - ingCopy, exists, err = il.GetByKeySafe(key) - if exists { - break - } - } + ingCopy, exists, err = su.getNamespacedInformer(ns).ingressLister.GetByKeySafe(key) + if err != nil { glog.V(3).Infof("error getting ing from Store by key: %v", err) return err @@ -415,12 +426,9 @@ func (su *statusUpdater) UpdateTransportServerStatus(ts *conf_v1alpha1.Transport var tsLatest interface{} var exists bool var err error - for _, tl := range su.transportServerLister { - tsLatest, exists, err = tl.Get(ts) - if exists { - break - } - } + + tsLatest, exists, err = su.getNamespacedInformer(ts.Namespace).transportServerLister.Get(ts) + if err != nil { glog.V(3).Infof("error getting TransportServer from Store: %v", err) return err @@ -466,12 +474,9 @@ func (su *statusUpdater) UpdateVirtualServerStatus(vs *conf_v1.VirtualServer, st var vsLatest interface{} var exists bool var err error - for _, vl := range su.virtualServerLister { - vsLatest, exists, err = vl.Get(vs) - if exists { - break - } - } + + vsLatest, exists, err = su.getNamespacedInformer(vs.Namespace).virtualServerLister.Get(vs) + if err != nil { glog.V(3).Infof("error getting VirtualServer from Store: %v", err) return err @@ -532,12 +537,9 @@ func (su *statusUpdater) UpdateVirtualServerRouteStatusWithReferencedBy(vsr *con var vsrLatest interface{} var exists bool var err error - for _, vl := range su.virtualServerRouteLister { - vsrLatest, exists, err = vl.Get(vsr) - if exists { - break - } - } + + vsrLatest, exists, err = su.getNamespacedInformer(vsr.Namespace).virtualServerRouteLister.Get(vsr) + if err != nil { glog.V(3).Infof("error getting VirtualServerRoute from Store: %v", err) return err @@ -575,12 +577,9 @@ func (su *statusUpdater) UpdateVirtualServerRouteStatus(vsr *conf_v1.VirtualServ var vsrLatest interface{} var exists bool var err error - for _, vl := range su.virtualServerRouteLister { - vsrLatest, exists, err = vl.Get(vsr) - if exists { - break - } - } + + vsrLatest, exists, err = su.getNamespacedInformer(vsr.Namespace).virtualServerRouteLister.Get(vsr) + if err != nil { glog.V(3).Infof("error getting VirtualServerRoute from Store: %v", err) return err @@ -614,12 +613,9 @@ func (su *statusUpdater) updateVirtualServerExternalEndpoints(vs *conf_v1.Virtua var vsLatest interface{} var exists bool var err error - for _, vl := range su.virtualServerLister { - vsLatest, exists, err = vl.Get(vs) - if exists { - break - } - } + + vsLatest, exists, err = su.getNamespacedInformer(vs.Namespace).virtualServerLister.Get(vs) + if err != nil { glog.V(3).Infof("error getting VirtualServer from Store: %v", err) return err @@ -645,12 +641,9 @@ func (su *statusUpdater) updateVirtualServerRouteExternalEndpoints(vsr *conf_v1. var vsrLatest interface{} var exists bool var err error - for _, vl := range su.virtualServerRouteLister { - vsrLatest, exists, err = vl.Get(vsr) - if exists { - break - } - } + + vsrLatest, exists, err = su.getNamespacedInformer(vsr.Namespace).virtualServerRouteLister.Get(vsr) + if err != nil { glog.V(3).Infof("error getting VirtualServerRoute from Store: %v", err) return err @@ -696,12 +689,9 @@ func (su *statusUpdater) UpdatePolicyStatus(pol *conf_v1.Policy, state string, r var polLatest interface{} var exists bool var err error - for _, vl := range su.policyLister { - polLatest, exists, err = vl.Get(pol) - if exists { - break - } - } + + polLatest, exists, err = su.getNamespacedInformer(pol.Namespace).policyLister.Get(pol) + if err != nil { glog.V(3).Infof("error getting policy from Store: %v", err) return err diff --git a/internal/k8s/status_test.go b/internal/k8s/status_test.go index 141597ed0c..43572f680d 100644 --- a/internal/k8s/status_test.go +++ b/internal/k8s/status_test.go @@ -44,11 +44,12 @@ func TestUpdateTransportServerStatus(t *testing.T) { if err != nil { t.Errorf("Error adding TransportServer to the transportserver lister: %v", err) } - tsl := []cache.Store{tsLister} + nsi := make(map[string]*namespacedInformer) + nsi["default"] = &namespacedInformer{transportServerLister: tsLister} su := statusUpdater{ - transportServerLister: tsl, - confClient: fakeClient, - keyFunc: cache.DeletionHandlingMetaNamespaceKeyFunc, + namespacedInformers: nsi, + confClient: fakeClient, + keyFunc: cache.DeletionHandlingMetaNamespaceKeyFunc, } err = su.UpdateTransportServerStatus(ts, "after status", "after reason", "after message") @@ -104,11 +105,12 @@ func TestUpdateTransportServerStatusIgnoreNoChange(t *testing.T) { if err != nil { t.Errorf("Error adding TransportServer to the transportserver lister: %v", err) } - tsl := []cache.Store{tsLister} + nsi := make(map[string]*namespacedInformer) + nsi["default"] = &namespacedInformer{transportServerLister: tsLister} su := statusUpdater{ - transportServerLister: tsl, - confClient: fakeClient, - keyFunc: cache.DeletionHandlingMetaNamespaceKeyFunc, + namespacedInformers: nsi, + confClient: fakeClient, + keyFunc: cache.DeletionHandlingMetaNamespaceKeyFunc, } err = su.UpdateTransportServerStatus(ts, "same status", "same reason", "same message") @@ -158,12 +160,13 @@ func TestUpdateTransportServerStatusMissingTransportServer(t *testing.T) { nil, ) - tsl := []cache.Store{tsLister} + nsi := make(map[string]*namespacedInformer) + nsi[""] = &namespacedInformer{transportServerLister: tsLister} su := statusUpdater{ - transportServerLister: tsl, - confClient: fakeClient, - keyFunc: cache.DeletionHandlingMetaNamespaceKeyFunc, + namespacedInformers: nsi, + confClient: fakeClient, + keyFunc: cache.DeletionHandlingMetaNamespaceKeyFunc, externalEndpoints: []conf_v1.ExternalEndpoint{ { IP: "123.123.123.123", @@ -214,14 +217,15 @@ func TestStatusUpdateWithExternalStatusAndExternalService(t *testing.T) { t.Errorf("Error adding Ingress to the ingress lister: %v", err) } - isl := []storeToIngressLister{ingLister} + nsi := make(map[string]*namespacedInformer) + nsi[""] = &namespacedInformer{ingressLister: ingLister} su := statusUpdater{ client: fakeClient, namespace: "namespace", externalServiceName: "service-name", externalStatusAddress: "123.123.123.123", - ingressLister: isl, + namespacedInformers: nsi, keyFunc: cache.DeletionHandlingMetaNamespaceKeyFunc, } err = su.ClearIngressStatus(ing) @@ -319,13 +323,14 @@ func TestStatusUpdateWithExternalStatusAndIngressLink(t *testing.T) { t.Errorf("Error adding Ingress to the ingress lister: %v", err) } - isl := []storeToIngressLister{ingLister} + nsi := make(map[string]*namespacedInformer) + nsi[""] = &namespacedInformer{ingressLister: ingLister} su := statusUpdater{ client: fakeClient, namespace: "namespace", externalStatusAddress: "", - ingressLister: isl, + namespacedInformers: nsi, keyFunc: cache.DeletionHandlingMetaNamespaceKeyFunc, } From 67c063afeadf210715e46d9408ab6b1531178a5b Mon Sep 17 00:00:00 2001 From: Ciara Stacke <18287516+ciarams87@users.noreply.github.com> Date: Thu, 10 Nov 2022 15:12:52 +0000 Subject: [PATCH 09/30] Create separate type for namespaced watchers in certmanager controller --- internal/certmanager/cm_controller.go | 116 +++++++++++---------- internal/certmanager/cm_controller_test.go | 27 +++-- internal/certmanager/helper.go | 18 ++++ internal/certmanager/sync.go | 24 ++--- internal/certmanager/sync_test.go | 15 ++- 5 files changed, 118 insertions(+), 82 deletions(-) diff --git a/internal/certmanager/cm_controller.go b/internal/certmanager/cm_controller.go index 2f0f78da57..03a6876b1c 100644 --- a/internal/certmanager/cm_controller.go +++ b/internal/certmanager/cm_controller.go @@ -56,16 +56,15 @@ const ( // and creates/ updates certificates for VS resources as required, // and VS resources when certificate objects are created/ updated type CmController struct { - vsLister []listers_v1.VirtualServerLister - sync SyncFn - ctx context.Context - mustSync []cache.InformerSynced - queue workqueue.RateLimitingInterface - vsSharedInformerFactory []vsinformers.SharedInformerFactory - cmSharedInformerFactory []cm_informers.SharedInformerFactory - kubeSharedInformerFactory []kubeinformers.SharedInformerFactory - recorder record.EventRecorder - cmClient *cm_clientset.Clientset + sync SyncFn + ctx context.Context + mustSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + informerGroup map[string]*namespacedInformer + recorder record.EventRecorder + cmClient *cm_clientset.Clientset + kubeClient kubernetes.Interface + vsClient k8s_nginx.Interface } // CmOpts is the options required for building the CmController @@ -78,27 +77,42 @@ type CmOpts struct { vsClient k8s_nginx.Interface } +type namespacedInformer struct { + vsSharedInformerFactory vsinformers.SharedInformerFactory + cmSharedInformerFactory cm_informers.SharedInformerFactory + kubeSharedInformerFactory kubeinformers.SharedInformerFactory + vsLister listers_v1.VirtualServerLister + cmLister cmlisters.CertificateLister +} + func (c *CmController) register() workqueue.RateLimitingInterface { - var cmLister []cmlisters.CertificateLister - for _, sif := range c.vsSharedInformerFactory { - c.vsLister = append(c.vsLister, sif.K8s().V1().VirtualServers().Lister()) - sif.K8s().V1().VirtualServers().Informer().AddEventHandler(&controllerpkg.QueuingEventHandler{ - Queue: c.queue, - }) - c.mustSync = append(c.mustSync, sif.K8s().V1().VirtualServers().Informer().HasSynced) - } + c.sync = SyncFnFor(c.recorder, c.cmClient, c.informerGroup) + return c.queue +} - for _, cif := range c.cmSharedInformerFactory { - cif.Certmanager().V1().Certificates().Informer().AddEventHandler(&controllerpkg.BlockingEventHandler{ - WorkFunc: certificateHandler(c.queue), - }) - cmLister = append(cmLister, cif.Certmanager().V1().Certificates().Lister()) - c.mustSync = append(c.mustSync, cif.Certmanager().V1().Certificates().Informer().HasSynced) - } +func (c *CmController) newNamespacedInformer(ns string) { + nsi := &namespacedInformer{} + nsi.cmSharedInformerFactory = cm_informers.NewSharedInformerFactoryWithOptions(c.cmClient, resyncPeriod, cm_informers.WithNamespace(ns)) + nsi.kubeSharedInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(c.kubeClient, resyncPeriod, kubeinformers.WithNamespace(ns)) + nsi.vsSharedInformerFactory = vsinformers.NewSharedInformerFactoryWithOptions(c.vsClient, resyncPeriod, vsinformers.WithNamespace(ns)) - c.sync = SyncFnFor(c.recorder, c.cmClient, cmLister) + c.addHandlers(nsi) - return c.queue + c.informerGroup[ns] = nsi +} + +func (c *CmController) addHandlers(nsi *namespacedInformer) { + nsi.vsLister = nsi.vsSharedInformerFactory.K8s().V1().VirtualServers().Lister() + nsi.vsSharedInformerFactory.K8s().V1().VirtualServers().Informer().AddEventHandler(&controllerpkg.QueuingEventHandler{ + Queue: c.queue, + }) + c.mustSync = append(c.mustSync, nsi.vsSharedInformerFactory.K8s().V1().VirtualServers().Informer().HasSynced) + + nsi.cmSharedInformerFactory.Certmanager().V1().Certificates().Informer().AddEventHandler(&controllerpkg.BlockingEventHandler{ + WorkFunc: certificateHandler(c.queue), + }) + nsi.cmLister = nsi.cmSharedInformerFactory.Certmanager().V1().Certificates().Lister() + c.mustSync = append(c.mustSync, nsi.cmSharedInformerFactory.Certmanager().V1().Certificates().Informer().HasSynced) } func (c *CmController) processItem(ctx context.Context, key string) error { @@ -108,14 +122,11 @@ func (c *CmController) processItem(ctx context.Context, key string) error { runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return err } + nsi := getNamespacedInformer(namespace, c.informerGroup) var vs *conf_v1.VirtualServer - for _, vl := range c.vsLister { - vs, err = vl.VirtualServers(namespace).Get(name) - if err == nil { - break - } - } + vs, err = nsi.vsLister.VirtualServers(namespace).Get(name) + if err != nil { return err } @@ -168,25 +179,22 @@ func NewCmController(opts *CmOpts) *CmController { // Create a cert-manager api client intcl, _ := cm_clientset.NewForConfig(opts.kubeConfig) - var vsSharedInformerFactory []vsinformers.SharedInformerFactory - var cmSharedInformerFactory []cm_informers.SharedInformerFactory - var kubeSharedInformerFactory []kubeinformers.SharedInformerFactory + ig := make(map[string]*namespacedInformer) - for _, ns := range opts.namespace { - cmSharedInformerFactory = append(cmSharedInformerFactory, cm_informers.NewSharedInformerFactoryWithOptions(intcl, resyncPeriod, cm_informers.WithNamespace(ns))) - kubeSharedInformerFactory = append(kubeSharedInformerFactory, kubeinformers.NewSharedInformerFactoryWithOptions(opts.kubeClient, resyncPeriod, kubeinformers.WithNamespace(ns))) - vsSharedInformerFactory = append(vsSharedInformerFactory, vsinformers.NewSharedInformerFactoryWithOptions(opts.vsClient, resyncPeriod, vsinformers.WithNamespace(ns))) + cm := &CmController{ + ctx: opts.context, + queue: workqueue.NewNamedRateLimitingQueue(controllerpkg.DefaultItemBasedRateLimiter(), ControllerName), + informerGroup: ig, + recorder: opts.eventRecorder, + cmClient: intcl, + kubeClient: opts.kubeClient, + vsClient: opts.vsClient, } - cm := &CmController{ - ctx: opts.context, - queue: workqueue.NewNamedRateLimitingQueue(controllerpkg.DefaultItemBasedRateLimiter(), ControllerName), - cmSharedInformerFactory: cmSharedInformerFactory, - kubeSharedInformerFactory: kubeSharedInformerFactory, - recorder: opts.eventRecorder, - cmClient: intcl, - vsSharedInformerFactory: vsSharedInformerFactory, + for _, ns := range opts.namespace { + cm.newNamespacedInformer(ns) } + cm.register() return cm } @@ -201,14 +209,10 @@ func (c *CmController) Run(stopCh <-chan struct{}) { glog.Infof("Starting cert-manager control loop") - for _, vif := range c.vsSharedInformerFactory { - go vif.Start(c.ctx.Done()) - } - for _, cif := range c.cmSharedInformerFactory { - go cif.Start(c.ctx.Done()) - } - for _, kif := range c.kubeSharedInformerFactory { - go kif.Start(c.ctx.Done()) + for _, ig := range c.informerGroup { + go ig.vsSharedInformerFactory.Start(c.ctx.Done()) + go ig.cmSharedInformerFactory.Start(c.ctx.Done()) + go ig.kubeSharedInformerFactory.Start(c.ctx.Done()) } // // wait for all the informer caches we depend on are synced glog.V(3).Infof("Waiting for %d caches to sync", len(c.mustSync)) diff --git a/internal/certmanager/cm_controller_test.go b/internal/certmanager/cm_controller_test.go index b7b0da7980..92c9dcaaaf 100644 --- a/internal/certmanager/cm_controller_test.go +++ b/internal/certmanager/cm_controller_test.go @@ -23,19 +23,16 @@ import ( cmapi "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" cmclient "github.com/cert-manager/cert-manager/pkg/client/clientset/versioned" - cm_informers "github.com/cert-manager/cert-manager/pkg/client/informers/externalversions" controllerpkg "github.com/cert-manager/cert-manager/pkg/controller" testpkg "github.com/nginxinc/kubernetes-ingress/internal/certmanager/test_files" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/util/workqueue" vsapi "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" k8s_nginx "github.com/nginxinc/kubernetes-ingress/pkg/client/clientset/versioned" - vsinformers "github.com/nginxinc/kubernetes-ingress/pkg/client/informers/externalversions" ) func Test_controller_Register(t *testing.T) { @@ -138,15 +135,27 @@ func Test_controller_Register(t *testing.T) { // Certificate event is received then HasSynced has not been setup // properly. + ig := make(map[string]*namespacedInformer) + + nsi := &namespacedInformer{ + cmSharedInformerFactory: b.Context.SharedInformerFactory, + kubeSharedInformerFactory: b.Context.KubeSharedInformerFactory, + vsSharedInformerFactory: b.VsSharedInformerFactory, + } + + ig[""] = nsi + cm := &CmController{ - ctx: b.RootContext, - queue: workqueue.NewNamedRateLimitingQueue(controllerpkg.DefaultItemBasedRateLimiter(), ControllerName), - cmSharedInformerFactory: []cm_informers.SharedInformerFactory{b.FakeCMInformerFactory()}, - kubeSharedInformerFactory: []kubeinformers.SharedInformerFactory{b.FakeKubeInformerFactory()}, - recorder: b.Recorder, - vsSharedInformerFactory: []vsinformers.SharedInformerFactory{b.VsSharedInformerFactory}, + ctx: b.RootContext, + queue: workqueue.NewNamedRateLimitingQueue(controllerpkg.DefaultItemBasedRateLimiter(), ControllerName), + informerGroup: ig, + recorder: b.Recorder, + kubeClient: b.Client, + vsClient: b.VSClient, } + cm.addHandlers(nsi) + queue := cm.register() b.Start() diff --git a/internal/certmanager/helper.go b/internal/certmanager/helper.go index 1b41ddd954..03c60d05b1 100644 --- a/internal/certmanager/helper.go +++ b/internal/certmanager/helper.go @@ -120,3 +120,21 @@ func translateVsSpec(crt *cmapi.Certificate, vsCmSpec *vsapi.CertManager) error } return nil } + +func getNamespacedInformer(ns string, ig map[string]*namespacedInformer) *namespacedInformer { + var nsi *namespacedInformer + var isGlobalNs bool + var exists bool + + nsi, isGlobalNs = ig[""] + + if !isGlobalNs { + // get the correct namespaced informers + nsi, exists = ig[ns] + if !exists { + // we are not watching this namespace + return nil + } + } + return nsi +} diff --git a/internal/certmanager/sync.go b/internal/certmanager/sync.go index f466adcd23..e4c7cd8e34 100644 --- a/internal/certmanager/sync.go +++ b/internal/certmanager/sync.go @@ -60,7 +60,7 @@ type SyncFn func(context.Context, *vsapi.VirtualServer) error func SyncFnFor( rec record.EventRecorder, cmClient clientset.Interface, - cmLister []cmlisters.CertificateLister, + ig map[string]*namespacedInformer, ) SyncFn { return func(ctx context.Context, vs *vsapi.VirtualServer) error { var err error @@ -75,7 +75,9 @@ func SyncFnFor( return err } - newCrts, updateCrts, err := buildCertificates(cmLister, vs, issuerName, issuerKind, issuerGroup) + nsi := getNamespacedInformer(vs.GetNamespace(), ig) + + newCrts, updateCrts, err := buildCertificates(nsi.cmLister, vs, issuerName, issuerKind, issuerGroup) if err != nil { glog.Errorf("Incorrect cert-manager configuration for VirtualServer resource: %v", err) rec.Eventf(vs, corev1.EventTypeWarning, reasonBadConfig, "Incorrect cert-manager configuration for VirtualServer resource: %s", @@ -106,12 +108,8 @@ func SyncFnFor( } var certs []*cmapi.Certificate - for _, cl := range cmLister { - certs, err = cl.Certificates(vs.GetNamespace()).List(labels.Everything()) - if len(certs) > 0 { - break - } - } + certs, err = nsi.cmLister.Certificates(vs.GetNamespace()).List(labels.Everything()) + if err != nil { return err } @@ -131,7 +129,7 @@ func SyncFnFor( } func buildCertificates( - cmLister []cmlisters.CertificateLister, + cmLister cmlisters.CertificateLister, vs *vsapi.VirtualServer, issuerName, issuerKind, issuerGroup string, ) (newCert, update []*cmapi.Certificate, _ error) { @@ -140,12 +138,8 @@ func buildCertificates( var existingCrt *cmapi.Certificate var err error - for _, cl := range cmLister { - existingCrt, err = cl.Certificates(vs.Namespace).Get(vs.Spec.TLS.Secret) - if err == nil { - break - } - } + existingCrt, err = cmLister.Certificates(vs.Namespace).Get(vs.Spec.TLS.Secret) + if !apierrors.IsNotFound(err) && err != nil { return nil, nil, err } diff --git a/internal/certmanager/sync_test.go b/internal/certmanager/sync_test.go index 453f7b1975..b3b662fd14 100644 --- a/internal/certmanager/sync_test.go +++ b/internal/certmanager/sync_test.go @@ -23,7 +23,6 @@ import ( cmapi "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" cmmeta "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" - cmlisters "github.com/cert-manager/cert-manager/pkg/client/listers/certmanager/v1" "github.com/cert-manager/cert-manager/test/unit/gen" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -491,7 +490,19 @@ func TestSync(t *testing.T) { } b.Init() defer b.Stop() - sync := SyncFnFor(b.Recorder, b.CMClient, []cmlisters.CertificateLister{b.SharedInformerFactory.Certmanager().V1().Certificates().Lister()}) + + ig := make(map[string]*namespacedInformer) + + nsi := &namespacedInformer{ + cmSharedInformerFactory: b.FakeCMInformerFactory(), + kubeSharedInformerFactory: b.FakeKubeInformerFactory(), + vsSharedInformerFactory: b.VsSharedInformerFactory, + cmLister: b.SharedInformerFactory.Certmanager().V1().Certificates().Lister(), + } + + ig[""] = nsi + + sync := SyncFnFor(b.Recorder, b.CMClient, ig) b.Start() err := sync(context.Background(), &test.VirtualServer) From bb3ad14b50a3b43eb1d93cb44e875d76c8eb194a Mon Sep 17 00:00:00 2001 From: Ciara Stacke <18287516+ciarams87@users.noreply.github.com> Date: Thu, 10 Nov 2022 15:13:07 +0000 Subject: [PATCH 10/30] Create separate type for namespaced watchers in extdns controller --- internal/externaldns/controller.go | 119 +++++++++++++++++------------ internal/externaldns/sync.go | 17 ++--- internal/externaldns/sync_test.go | 6 +- 3 files changed, 81 insertions(+), 61 deletions(-) diff --git a/internal/externaldns/controller.go b/internal/externaldns/controller.go index 8522e03a64..db3732cd31 100644 --- a/internal/externaldns/controller.go +++ b/internal/externaldns/controller.go @@ -28,15 +28,20 @@ const ( // ExtDNSController represents ExternalDNS controller. type ExtDNSController struct { - vsLister []listersV1.VirtualServerLister - sync SyncFn - ctx context.Context - mustSync []cache.InformerSynced - queue workqueue.RateLimitingInterface - sharedInformerFactory []k8s_nginx_informers.SharedInformerFactory - recorder record.EventRecorder - client k8s_nginx.Interface - extdnslister []extdnslisters.DNSEndpointLister + sync SyncFn + ctx context.Context + mustSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + recorder record.EventRecorder + client k8s_nginx.Interface + informerGroup map[string]*namespacedInformer + resync time.Duration +} + +type namespacedInformer struct { + vsLister listersV1.VirtualServerLister + sharedInformerFactory k8s_nginx_informers.SharedInformerFactory + extdnslister extdnslisters.DNSEndpointLister } // ExtDNSOpts represents config required for building the External DNS Controller. @@ -50,45 +55,44 @@ type ExtDNSOpts struct { // NewController takes external dns config and return a new External DNS Controller. func NewController(opts *ExtDNSOpts) *ExtDNSController { - var sharedInformerFactory []k8s_nginx_informers.SharedInformerFactory - for _, ns := range opts.namespace { - sif := k8s_nginx_informers.NewSharedInformerFactoryWithOptions(opts.client, opts.resyncPeriod, k8s_nginx_informers.WithNamespace(ns)) - sharedInformerFactory = append(sharedInformerFactory, sif) + ig := make(map[string]*namespacedInformer) + c := &ExtDNSController{ + ctx: opts.context, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName), + informerGroup: ig, + recorder: opts.eventRecorder, + client: opts.client, + resync: opts.resyncPeriod, } - c := &ExtDNSController{ - ctx: opts.context, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName), - sharedInformerFactory: sharedInformerFactory, - recorder: opts.eventRecorder, - client: opts.client, + for _, ns := range opts.namespace { + c.newNamespacedInformer(ns) } - c.register() + + c.sync = SyncFnFor(c.recorder, c.client, c.informerGroup) return c } -func (c *ExtDNSController) register() workqueue.Interface { - for _, sif := range c.sharedInformerFactory { - c.vsLister = append(c.vsLister, sif.K8s().V1().VirtualServers().Lister()) - c.extdnslister = append(c.extdnslister, sif.Externaldns().V1().DNSEndpoints().Lister()) - - sif.K8s().V1().VirtualServers().Informer().AddEventHandler( - &QueuingEventHandler{ - Queue: c.queue, - }, - ) - - sif.Externaldns().V1().DNSEndpoints().Informer().AddEventHandler(&BlockingEventHandler{ - WorkFunc: externalDNSHandler(c.queue), - }) - - c.mustSync = append(c.mustSync, - sif.K8s().V1().VirtualServers().Informer().HasSynced, - sif.Externaldns().V1().DNSEndpoints().Informer().HasSynced, - ) - } - c.sync = SyncFnFor(c.recorder, c.client, c.extdnslister) - return c.queue +func (c *ExtDNSController) newNamespacedInformer(ns string) { + nsi := namespacedInformer{sharedInformerFactory: k8s_nginx_informers.NewSharedInformerFactoryWithOptions(c.client, c.resync, k8s_nginx_informers.WithNamespace(ns))} + nsi.vsLister = nsi.sharedInformerFactory.K8s().V1().VirtualServers().Lister() + nsi.extdnslister = nsi.sharedInformerFactory.Externaldns().V1().DNSEndpoints().Lister() + + nsi.sharedInformerFactory.K8s().V1().VirtualServers().Informer().AddEventHandler( + &QueuingEventHandler{ + Queue: c.queue, + }, + ) + + nsi.sharedInformerFactory.Externaldns().V1().DNSEndpoints().Informer().AddEventHandler(&BlockingEventHandler{ + WorkFunc: externalDNSHandler(c.queue), + }) + + c.mustSync = append(c.mustSync, + nsi.sharedInformerFactory.K8s().V1().VirtualServers().Informer().HasSynced, + nsi.sharedInformerFactory.Externaldns().V1().DNSEndpoints().Informer().HasSynced, + ) + c.informerGroup[ns] = &nsi } // Run sets up the event handlers for types we are interested in, as well @@ -101,8 +105,8 @@ func (c *ExtDNSController) Run(stopCh <-chan struct{}) { glog.Infof("Starting external-dns control loop") - for _, sif := range c.sharedInformerFactory { - go sif.Start(c.ctx.Done()) + for _, ig := range c.informerGroup { + go ig.sharedInformerFactory.Start(c.ctx.Done()) } // wait for all informer caches to be synced @@ -154,12 +158,9 @@ func (c *ExtDNSController) processItem(ctx context.Context, key string) error { return err } var vs *conf_v1.VirtualServer - for _, vl := range c.vsLister { - vs, err = vl.VirtualServers(namespace).Get(name) - if err == nil { - break - } - } + nsi := getNamespacedInformer(namespace, c.informerGroup) + vs, err = nsi.vsLister.VirtualServers(namespace).Get(name) + if err != nil { return err } @@ -209,3 +210,21 @@ func BuildOpts( resyncPeriod: resync, } } + +func getNamespacedInformer(ns string, ig map[string]*namespacedInformer) *namespacedInformer { + var nsi *namespacedInformer + var isGlobalNs bool + var exists bool + + nsi, isGlobalNs = ig[""] + + if !isGlobalNs { + // get the correct namespaced informers + nsi, exists = ig[ns] + if !exists { + // we are not watching this namespace + return nil + } + } + return nsi +} diff --git a/internal/externaldns/sync.go b/internal/externaldns/sync.go index debaf6a6c1..cc607cb661 100644 --- a/internal/externaldns/sync.go +++ b/internal/externaldns/sync.go @@ -36,7 +36,7 @@ var vsGVK = vsapi.SchemeGroupVersion.WithKind("VirtualServer") type SyncFn func(context.Context, *vsapi.VirtualServer) error // SyncFnFor knows how to reconcile VirtualServer DNSEndpoint object. -func SyncFnFor(rec record.EventRecorder, client clientset.Interface, extdnsLister []extdnslisters.DNSEndpointLister) SyncFn { +func SyncFnFor(rec record.EventRecorder, client clientset.Interface, ig map[string]*namespacedInformer) SyncFn { return func(ctx context.Context, vs *vsapi.VirtualServer) error { // Do nothing if ExternalDNS is not present (nil) in VS or is not enabled. if !vs.Spec.ExternalDNS.Enable { @@ -56,7 +56,9 @@ func SyncFnFor(rec record.EventRecorder, client clientset.Interface, extdnsListe return err } - newDNSEndpoint, updateDNSEndpoint, err := buildDNSEndpoint(extdnsLister, vs, targets, recordType) + nsi := getNamespacedInformer(vs.Namespace, ig) + + newDNSEndpoint, updateDNSEndpoint, err := buildDNSEndpoint(nsi.extdnslister, vs, targets, recordType) if err != nil { glog.Errorf("error message here %s", err) rec.Eventf(vs, corev1.EventTypeWarning, reasonBadConfig, "Incorrect DNSEndpoint config for VirtualServer resource: %s", err) @@ -136,17 +138,14 @@ func getValidTargets(endpoints []vsapi.ExternalEndpoint) (extdnsapi.Targets, str return targets, recordType, err } -func buildDNSEndpoint(extdnsLister []extdnslisters.DNSEndpointLister, vs *vsapi.VirtualServer, targets extdnsapi.Targets, recordType string) (*extdnsapi.DNSEndpoint, *extdnsapi.DNSEndpoint, error) { +func buildDNSEndpoint(extdnsLister extdnslisters.DNSEndpointLister, vs *vsapi.VirtualServer, targets extdnsapi.Targets, recordType string) (*extdnsapi.DNSEndpoint, *extdnsapi.DNSEndpoint, error) { var updateDNSEndpoint *extdnsapi.DNSEndpoint var newDNSEndpoint *extdnsapi.DNSEndpoint var existingDNSEndpoint *extdnsapi.DNSEndpoint var err error - for _, el := range extdnsLister { - existingDNSEndpoint, err = el.DNSEndpoints(vs.Namespace).Get(vs.ObjectMeta.Name) - if err == nil { - break - } - } + + existingDNSEndpoint, err = extdnsLister.DNSEndpoints(vs.Namespace).Get(vs.ObjectMeta.Name) + if !apierrors.IsNotFound(err) && err != nil { return nil, nil, err } diff --git a/internal/externaldns/sync_test.go b/internal/externaldns/sync_test.go index 58ca613a24..812e4a2e66 100644 --- a/internal/externaldns/sync_test.go +++ b/internal/externaldns/sync_test.go @@ -276,8 +276,10 @@ func TestSync_ReturnsErrorOnFailure(t *testing.T) { for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { rec := EventRecorder{} - eplister := []extdnsclient.DNSEndpointLister{DNSEPLister{}} - fn := SyncFnFor(rec, nil, eplister) + ig := make(map[string]*namespacedInformer) + nsi := namespacedInformer{extdnslister: DNSEPLister{}} + ig[""] = &nsi + fn := SyncFnFor(rec, nil, ig) err := fn(context.TODO(), tc.input) if err == nil { t.Error("want error, got nil") From 7ec801c120e9fc18e5f27e9ae58c542bca58ae17 Mon Sep 17 00:00:00 2001 From: Ciara Stacke <18287516+ciarams87@users.noreply.github.com> Date: Thu, 10 Nov 2022 16:35:49 +0000 Subject: [PATCH 11/30] Fix case where namespace is not watching secrets --- internal/k8s/controller.go | 3 +++ internal/k8s/controller_test.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 1e2dc1f01a..4b34be3057 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -866,6 +866,9 @@ func (lbc *LoadBalancerController) updateAllConfigs() { // it will report warnings. (See https://github.com/nginxinc/kubernetes-ingress/issues/1448 ) func (lbc *LoadBalancerController) preSyncSecrets() { for _, ni := range lbc.namespacedInformers { + if !ni.isSecretsEnabledNamespace { + break + } objects := ni.secretLister.List() glog.V(3).Infof("PreSync %d Secrets", len(objects)) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 3cf7c883d9..37c9ae02c2 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -2298,7 +2298,7 @@ func TestPreSyncSecrets(t *testing.T) { }, } nsi := make(map[string]*namespacedInformer) - nsi[""] = &namespacedInformer{secretLister: secretLister} + nsi[""] = &namespacedInformer{secretLister: secretLister, isSecretsEnabledNamespace: true} lbc := LoadBalancerController{ isNginxPlus: true, From 2397d33f594eb9700aabe7213e8226246a68645a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Mon, 14 Nov 2022 13:18:07 +0000 Subject: [PATCH 12/30] Change informer --- examples/custom-resources/basic-configuration/cafe.yaml | 4 ++-- internal/k8s/controller.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/custom-resources/basic-configuration/cafe.yaml b/examples/custom-resources/basic-configuration/cafe.yaml index f049e8bf29..1525cd18d6 100644 --- a/examples/custom-resources/basic-configuration/cafe.yaml +++ b/examples/custom-resources/basic-configuration/cafe.yaml @@ -3,7 +3,7 @@ kind: Deployment metadata: name: coffee spec: - replicas: 2 + replicas: 220 selector: matchLabels: app: coffee @@ -36,7 +36,7 @@ kind: Deployment metadata: name: tea spec: - replicas: 1 + replicas: 220 selector: matchLabels: app: tea diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 2e15f68c05..c1d01f2012 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -528,7 +528,7 @@ func (lbc *LoadBalancerController) addEndpointHandler(handlers cache.ResourceEve // addEndpointSliceHandlerWithStore adds the handler for EndpointSlices to the controller func (lbc *LoadBalancerController) addEndpointSliceHandlerWithStore(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { - informer := nsi.secretInformerFactory.Discovery().V1().EndpointSlices().Informer() + informer := nsi.sharedInformerFactory.Discovery().V1().EndpointSlices().Informer() informer.AddEventHandler(handlers) var el storeToEndpointSliceLister el.Store = informer.GetStore() From 422b9ca0aeaaaeec747ea21360907452be005da5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Mon, 14 Nov 2022 13:31:20 +0000 Subject: [PATCH 13/30] Revert example config --- examples/custom-resources/basic-configuration/cafe.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/custom-resources/basic-configuration/cafe.yaml b/examples/custom-resources/basic-configuration/cafe.yaml index 1525cd18d6..f049e8bf29 100644 --- a/examples/custom-resources/basic-configuration/cafe.yaml +++ b/examples/custom-resources/basic-configuration/cafe.yaml @@ -3,7 +3,7 @@ kind: Deployment metadata: name: coffee spec: - replicas: 220 + replicas: 2 selector: matchLabels: app: coffee @@ -36,7 +36,7 @@ kind: Deployment metadata: name: tea spec: - replicas: 220 + replicas: 1 selector: matchLabels: app: tea From 45850364cc8f0eaf64102cab33264b14d4d1842d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Mon, 14 Nov 2022 15:44:02 +0000 Subject: [PATCH 14/30] Find endpoints with endpointslices using subselectors --- internal/k8s/controller.go | 53 +++++- internal/k8s/controller_test.go | 321 ++++++++++++++++++++++++++++++++ 2 files changed, 372 insertions(+), 2 deletions(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index c1d01f2012..65e6c49568 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -3364,6 +3364,18 @@ func (lbc *LoadBalancerController) getEndpointsForServiceWithSubselector(targetP return nil, err } + var svcEndpointSlices []discovery_v1.EndpointSlice + svcEndpointSlices, err = nsi.endpointSliceLister.GetServiceEndpointSlices(svc) + if err != nil { + glog.V(3).Infof("Error getting endpointslices for service %s from the cache: %v", svc.Name, err) + return nil, err + } + + if len(svcEndpointSlices) > 0 { + endps = getEndpointsFromEndpointSlicesForSubselectedPods(targetPort, pods, svcEndpointSlices) + return endps, nil + } + endps = getEndpointsBySubselectedPods(targetPort, pods, svcEps) return endps, nil } @@ -3396,6 +3408,45 @@ func getEndpointsBySubselectedPods(targetPort int32, pods []*api_v1.Pod, svcEps return endps } +func getEndpointsFromEndpointSlicesForSubselectedPods(targetPort int32, pods []*api_v1.Pod, svcEndpointSlices []discovery_v1.EndpointSlice) (podEndpoints []podEndpoint) { + endpointSet := make(map[podEndpoint]struct{}) + for _, pod := range pods { + for _, endpointSlice := range svcEndpointSlices { + for _, port := range endpointSlice.Ports { + if *port.Port != targetPort { + continue + } + for _, endpoint := range endpointSlice.Endpoints { + for _, address := range endpoint.Addresses { + if pod.Status.PodIP == address { + addr := ipv6SafeAddrPort(pod.Status.PodIP, targetPort) + ownerType, ownerName := getPodOwnerTypeAndName(pod) + podEndpoint := podEndpoint{ + Address: addr, + PodName: getPodName(endpoint.TargetRef), + MeshPodOwner: configs.MeshPodOwner{ + OwnerType: ownerType, + OwnerName: ownerName, + }, + } + endpointSet[podEndpoint] = struct{}{} + podEndpoints = append(podEndpoints, podEndpoint) + } + } + } + } + } + } + if len(endpointSet) == 0 { + return nil + } + endpoints := make([]podEndpoint, 0, len(endpointSet)) + for ep := range endpointSet { + endpoints = append(endpoints, ep) + } + return endpoints +} + func ipv6SafeAddrPort(addr string, port int32) string { return net.JoinHostPort(addr, strconv.Itoa(int(port))) } @@ -3529,8 +3580,6 @@ func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlices(endpoin for _, endpointSlice := range endpointSlices { for _, endpointSlicePort := range endpointSlice.Ports { if *endpointSlicePort.Port == targetPort { - // Endpoints may be duplicated across multiple EndpointSlices. - // Using a set to prevent returning duplicate endpoints. for _, endpoint := range endpointSlice.Endpoints { for _, endpointAddress := range endpoint.Addresses { address := ipv6SafeAddrPort(endpointAddress, *endpointSlicePort.Port) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index a4969e87f4..108b1462b1 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -712,6 +712,327 @@ func TestGetEndpointsBySubselectedPods(t *testing.T) { } } +func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { + var endpointPort int32 + endpointPort = 8080 + + boolPointer := func(b bool) *bool { return &b } + tests := []struct { + desc string + targetPort int32 + svcEndpointSlices []discovery_v1.EndpointSlice + pods []*api_v1.Pod + expectedEps []podEndpoint + }{ + { + desc: "find one pod in one endpointslice", + targetPort: 8080, + expectedEps: []podEndpoint{ + { + Address: "1.2.3.4:8080", + MeshPodOwner: configs.MeshPodOwner{ + OwnerType: "deployment", + OwnerName: "deploy-1", + }, + }, + }, + pods: []*api_v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + }, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + }, + }, + }, + }, + { + desc: "find one pod in two endpointslices with duplicate endpoints", + targetPort: 8080, + expectedEps: []podEndpoint{ + { + Address: "1.2.3.4:8080", + MeshPodOwner: configs.MeshPodOwner{ + OwnerType: "deployment", + OwnerName: "deploy-1", + }, + }, + }, + pods: []*api_v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + }, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + }, + }, + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + }, + }, + }, + }, + { + desc: "find two pods in one endpointslice", + targetPort: 8080, + expectedEps: []podEndpoint{ + { + Address: "1.2.3.4:8080", + MeshPodOwner: configs.MeshPodOwner{ + OwnerType: "deployment", + OwnerName: "deploy-1", + }, + }, + { + Address: "5.6.7.8:8080", + MeshPodOwner: configs.MeshPodOwner{ + OwnerType: "deployment", + OwnerName: "deploy-1", + }, + }, + }, + pods: []*api_v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "5.6.7.8", + }, + }, + }, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + { + Addresses: []string{ + "5.6.7.8", + }, + }, + }, + }, + }, + }, + { + desc: "find two pods in two endpointslices", + targetPort: 8080, + expectedEps: []podEndpoint{ + { + Address: "1.2.3.4:8080", + MeshPodOwner: configs.MeshPodOwner{ + OwnerType: "deployment", + OwnerName: "deploy-1", + }, + }, + { + Address: "5.6.7.8:8080", + MeshPodOwner: configs.MeshPodOwner{ + OwnerType: "deployment", + OwnerName: "deploy-1", + }, + }, + }, + pods: []*api_v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "5.6.7.8", + }, + }, + }, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + }, + }, + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "5.6.7.8", + }, + }, + }, + }, + }, + }, + { + desc: "no pods found", + targetPort: 8080, + expectedEps: nil, + pods: []*api_v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + }, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "5.4.3.2", + }, + }, + }, + }, + }, + }, + { + desc: "targetPort mismatch", + targetPort: 21, + expectedEps: nil, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotEndps := getEndpointsFromEndpointSlicesForSubselectedPods(test.targetPort, test.pods, test.svcEndpointSlices) + if !reflect.DeepEqual(gotEndps, test.expectedEps) { + t.Errorf("getEndpointsFromEndpointSlicesForSubselectedPods() = %v, want %v", gotEndps, test.expectedEps) + } + }) + } +} + func TestGetStatusFromEventTitle(t *testing.T) { tests := []struct { eventTitle string From 4afe089dbc8bc8233a3bc9fcd874d567d84ee43f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Mon, 14 Nov 2022 15:54:49 +0000 Subject: [PATCH 15/30] Add additional unit test --- internal/k8s/controller_test.go | 77 ++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 36 deletions(-) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 108b1462b1..a171c5fc87 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -608,6 +608,12 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { }, }, }, + { + desc: "no endpoints found", + targetPort: 8080, + expectedEndpointSlices: nil, + svcEndpointSlices: []discovery_v1.EndpointSlice{}, + }, } svc := api_v1.Service{ @@ -645,11 +651,45 @@ func TestGetEndpointsBySubselectedPods(t *testing.T) { desc string targetPort int32 svcEps api_v1.Endpoints + pods []*api_v1.Pod expectedEps []podEndpoint }{ { desc: "find one endpoint", targetPort: 80, + svcEps: api_v1.Endpoints{ + Subsets: []api_v1.EndpointSubset{ + { + Addresses: []api_v1.EndpointAddress{ + { + IP: "1.2.3.4", + Hostname: "asdf.com", + }, + }, + Ports: []api_v1.EndpointPort{ + { + Port: 80, + }, + }, + }, + }, + }, + pods: []*api_v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + }, expectedEps: []podEndpoint{ { Address: "1.2.3.4:80", @@ -667,44 +707,9 @@ func TestGetEndpointsBySubselectedPods(t *testing.T) { }, } - pods := []*api_v1.Pod{ - { - ObjectMeta: meta_v1.ObjectMeta{ - OwnerReferences: []meta_v1.OwnerReference{ - { - Kind: "Deployment", - Name: "deploy-1", - Controller: boolPointer(true), - }, - }, - }, - Status: api_v1.PodStatus{ - PodIP: "1.2.3.4", - }, - }, - } - - svcEps := api_v1.Endpoints{ - Subsets: []api_v1.EndpointSubset{ - { - Addresses: []api_v1.EndpointAddress{ - { - IP: "1.2.3.4", - Hostname: "asdf.com", - }, - }, - Ports: []api_v1.EndpointPort{ - { - Port: 80, - }, - }, - }, - }, - } - for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotEndps := getEndpointsBySubselectedPods(test.targetPort, pods, svcEps) + gotEndps := getEndpointsBySubselectedPods(test.targetPort, test.pods, test.svcEps) if !reflect.DeepEqual(gotEndps, test.expectedEps) { t.Errorf("getEndpointsBySubselectedPods() = %v, want %v", gotEndps, test.expectedEps) } From be0c6a1040229fc6cb4cebbcf94079eb6b97f371 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Mon, 14 Nov 2022 16:40:56 +0000 Subject: [PATCH 16/30] Fix linting --- internal/k8s/configuration.go | 20 +++++++------------- internal/k8s/controller.go | 22 ++-------------------- internal/k8s/controller_test.go | 9 ++++----- internal/k8s/handlers.go | 3 ++- internal/k8s/utils.go | 9 ++------- 5 files changed, 17 insertions(+), 46 deletions(-) diff --git a/internal/k8s/configuration.go b/internal/k8s/configuration.go index 303fe566fb..fd4c78bebc 100644 --- a/internal/k8s/configuration.go +++ b/internal/k8s/configuration.go @@ -344,14 +344,13 @@ type Configuration struct { globalConfigurationValidator *validation.GlobalConfigurationValidator transportServerValidator *validation.TransportServerValidator - secretReferenceChecker *secretReferenceChecker - serviceReferenceChecker *serviceReferenceChecker - endpointReferenceChecker *serviceReferenceChecker - endpointSliceReferenceChecker *serviceReferenceChecker //New code - policyReferenceChecker *policyReferenceChecker - appPolicyReferenceChecker *appProtectResourceReferenceChecker - appLogConfReferenceChecker *appProtectResourceReferenceChecker - appDosProtectedChecker *dosResourceReferenceChecker + secretReferenceChecker *secretReferenceChecker + serviceReferenceChecker *serviceReferenceChecker + endpointReferenceChecker *serviceReferenceChecker + policyReferenceChecker *policyReferenceChecker + appPolicyReferenceChecker *appProtectResourceReferenceChecker + appLogConfReferenceChecker *appProtectResourceReferenceChecker + appDosProtectedChecker *dosResourceReferenceChecker isPlus bool appProtectEnabled bool @@ -856,11 +855,6 @@ func (c *Configuration) FindResourcesForEndpoints(endpointsNamespace string, end return c.findResourcesForResourceReference(endpointsNamespace, endpointsName, c.endpointReferenceChecker) } -// New Code -func (c *Configuration) FindResourcesForEndpointSlices(endpointSlicesNamespace string, endpointSliceName string) []Resource { - return c.findResourcesForResourceReference(endpointSlicesNamespace, endpointSliceName, c.endpointSliceReferenceChecker) -} - // FindResourcesForSecret finds resources that reference the specified secret. func (c *Configuration) FindResourcesForSecret(secretNamespace string, secretName string) []Resource { return c.findResourcesForResourceReference(secretNamespace, secretName, c.secretReferenceChecker) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 65e6c49568..2447c28ddf 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -19,7 +19,6 @@ package k8s import ( "context" "fmt" - discovery_v1 "k8s.io/api/discovery/v1" "net" "strconv" "strings" @@ -54,6 +53,7 @@ import ( "github.com/nginxinc/kubernetes-ingress/internal/metrics/collectors" api_v1 "k8s.io/api/core/v1" + discovery_v1 "k8s.io/api/discovery/v1" networking "k8s.io/api/networking/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -537,23 +537,6 @@ func (lbc *LoadBalancerController) addEndpointSliceHandlerWithStore(handlers cac lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } -// addEndpointSliceHandlerWithIndexer adds the handler for EndpointSlices to the controller -//func (lbc *LoadBalancerController) addEndpointSliceHandlerWithIndexer(handlers cache.ResourceEventHandlerFuncs) { -// for _, sif := range lbc.sharedInformerFactory { -// informer := sif.Discovery().V1().EndpointSlices().Informer() -// indexerErr := informer.AddIndexers("metadata/label....") -// if indexerErr != nil { -// glog.Errorf("unable to add indexer %s for EndpointSlices", "the indexer") -// } -// informer.AddEventHandler(handlers) -// var el indexerToEndpointSliceLister -// el.Indexer = informer.GetIndexer().ByIndex("metadata/label...") -// lbc.endpointSliceLister = append(lbc.endpointSliceLister, el) -// -// lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) -// } -//} - // addConfigMapHandler adds the handler for config maps to the controller func (lbc *LoadBalancerController) addConfigMapHandler(handlers cache.ResourceEventHandlerFuncs, namespace string) { lbc.configMapLister.Store, lbc.configMapController = cache.NewInformer( @@ -3587,8 +3570,7 @@ func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlices(endpoin Address: address, } if endpoint.TargetRef != nil { - parentType, parentName := - lbc.getPodOwnerTypeAndNameFromAddress(endpoint.TargetRef.Namespace, endpoint.TargetRef.Name) + parentType, parentName := lbc.getPodOwnerTypeAndNameFromAddress(endpoint.TargetRef.Namespace, endpoint.TargetRef.Name) podEndpoint.OwnerType = parentType podEndpoint.OwnerName = parentName podEndpoint.PodName = endpoint.TargetRef.Name diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index a171c5fc87..2facfd8a5e 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -3,12 +3,13 @@ package k8s import ( "errors" "fmt" - discovery_v1 "k8s.io/api/discovery/v1" "reflect" "sort" "strings" "testing" + discovery_v1 "k8s.io/api/discovery/v1" + "github.com/google/go-cmp/cmp" "github.com/nginxinc/kubernetes-ingress/internal/configs" "github.com/nginxinc/kubernetes-ingress/internal/configs/version1" @@ -472,8 +473,7 @@ func TestFormatWarningsMessages(t *testing.T) { } func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { - var endpointPort int32 - endpointPort = 8080 + endpointPort := int32(8080) lbc := LoadBalancerController{ isNginxPlus: true, @@ -718,8 +718,7 @@ func TestGetEndpointsBySubselectedPods(t *testing.T) { } func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { - var endpointPort int32 - endpointPort = 8080 + endpointPort := int32(8080) boolPointer := func(b bool) *bool { return &b } tests := []struct { diff --git a/internal/k8s/handlers.go b/internal/k8s/handlers.go index d756ca2626..326704d10a 100644 --- a/internal/k8s/handlers.go +++ b/internal/k8s/handlers.go @@ -2,10 +2,11 @@ package k8s import ( "fmt" - discovery_v1 "k8s.io/api/discovery/v1" "reflect" "sort" + discovery_v1 "k8s.io/api/discovery/v1" + "github.com/nginxinc/kubernetes-ingress/pkg/apis/dos/v1beta1" "github.com/golang/glog" diff --git a/internal/k8s/utils.go b/internal/k8s/utils.go index d7503d839c..226d87c02a 100644 --- a/internal/k8s/utils.go +++ b/internal/k8s/utils.go @@ -18,10 +18,11 @@ package k8s import ( "fmt" - discovery_v1 "k8s.io/api/discovery/v1" "reflect" "strings" + discovery_v1 "k8s.io/api/discovery/v1" + v1 "k8s.io/api/core/v1" networking "k8s.io/api/networking/v1" @@ -93,11 +94,6 @@ type storeToEndpointSliceLister struct { cache.Store } -// Indexer for EndpointSlices -type indexerToEndpointSliceLister struct { - cache.Indexer -} - // GetServiceEndpoints returns the endpoints of a service, matched on service name. func (s *storeToEndpointLister) GetServiceEndpoints(svc *v1.Service) (ep v1.Endpoints, err error) { for _, m := range s.Store.List() { @@ -111,7 +107,6 @@ func (s *storeToEndpointLister) GetServiceEndpoints(svc *v1.Service) (ep v1.Endp // GetServiceEndpoints returns the endpoints of a service, matched on service name. func (s *storeToEndpointSliceLister) GetServiceEndpointSlices(svc *v1.Service) (endpointSlices []discovery_v1.EndpointSlice, err error) { - // -- Code to be used when Indexer is set up //svcNameKey := fmt.Sprintf("%s/%s", svc.Namespace, svc.Name) //epStore, exists, err := s.Store.GetByKey(svcNameKey) From e9b88bcb626ac0aeaafeb1b677ad19c561880fbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Wed, 16 Nov 2022 09:21:01 +0000 Subject: [PATCH 17/30] Sync encpointslices for ingress and transport server --- internal/k8s/controller.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 2447c28ddf..609246f569 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -790,11 +790,26 @@ func (lbc *LoadBalancerController) syncEndpointSlices(task task) { } endpointSlice := obj.(*discovery_v1.EndpointSlice) - // Find EndpointSlices based on service name. This is mapped for the label `kubernetes.io/service-name`. svcResource := lbc.configuration.FindResourcesForService(endpointSlice.Namespace, endpointSlice.Labels["kubernetes.io/service-name"]) resourceExes := lbc.createExtendedResources(svcResource) + if len(resourceExes.IngressExes) > 0 { + glog.V(3).Infof("Updating EndpointSlices for %v", resourceExes.IngressExes) + err = lbc.configurator.UpdateEndpoints(resourceExes.IngressExes) + if err != nil { + glog.Errorf("Error updating EndpointSlices for %v: %v", resourceExes.IngressExes, err) + } + } + + if len(resourceExes.MergeableIngresses) > 0 { + glog.V(3).Infof("Updating EndpointSlices for %v", resourceExes.MergeableIngresses) + err = lbc.configurator.UpdateEndpointsMergeableIngress(resourceExes.MergeableIngresses) + if err != nil { + glog.Errorf("Error updating EndpointSlices for %v: %v", resourceExes.MergeableIngresses, err) + } + } + if lbc.areCustomResourcesEnabled { if len(resourceExes.VirtualServerExes) > 0 { glog.V(3).Infof("Updating EndpointSlices for %v", resourceExes.VirtualServerExes) From aef189129680d4dcf53d0f2f3f300763ef22d7ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Wed, 16 Nov 2022 09:33:52 +0000 Subject: [PATCH 18/30] Remove commentedcode and refactor --- internal/k8s/controller.go | 6 +++--- internal/k8s/utils.go | 19 ++++--------------- 2 files changed, 7 insertions(+), 18 deletions(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 609246f569..c64e8b8dae 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -362,7 +362,7 @@ func (lbc *LoadBalancerController) newNamespacedInformer(ns string) { lbc.addIngressHandler(createIngressHandlers(lbc), nsi) lbc.addServiceHandler(createServiceHandlers(lbc), nsi) lbc.addEndpointHandler(createEndpointHandlers(lbc), nsi) - lbc.addEndpointSliceHandlerWithStore(createEndpointSliceHandlers(lbc), nsi) + lbc.addEndpointSliceHandler(createEndpointSliceHandlers(lbc), nsi) lbc.addPodHandler(nsi) secretsTweakListOptionsFunc := func(options *meta_v1.ListOptions) { @@ -526,8 +526,8 @@ func (lbc *LoadBalancerController) addEndpointHandler(handlers cache.ResourceEve lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } -// addEndpointSliceHandlerWithStore adds the handler for EndpointSlices to the controller -func (lbc *LoadBalancerController) addEndpointSliceHandlerWithStore(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { +// addEndpointSliceHandler adds the handler for EndpointSlices to the controller +func (lbc *LoadBalancerController) addEndpointSliceHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { informer := nsi.sharedInformerFactory.Discovery().V1().EndpointSlices().Informer() informer.AddEventHandler(handlers) var el storeToEndpointSliceLister diff --git a/internal/k8s/utils.go b/internal/k8s/utils.go index 226d87c02a..f192ca8c68 100644 --- a/internal/k8s/utils.go +++ b/internal/k8s/utils.go @@ -39,7 +39,7 @@ type storeToIngressLister struct { cache.Store } -// GetByKeySafe calls Store.GetByKeySafe and returns a copy of the ingress so it is +// GetByKeySafe calls Store.GetByKeySafe and returns a copy of the ingress, so it is // safe to modify. func (s *storeToIngressLister) GetByKeySafe(key string) (ing *networking.Ingress, exists bool, err error) { item, exists, err := s.Store.GetByKey(key) @@ -71,7 +71,7 @@ func (s *storeToConfigMapLister) List() (cfgm v1.ConfigMapList, err error) { return cfgm, nil } -// indexerToPodLister makes a Indexer that lists Pods. +// indexerToPodLister makes an Indexer that lists Pods. type indexerToPodLister struct { cache.Indexer } @@ -105,29 +105,18 @@ func (s *storeToEndpointLister) GetServiceEndpoints(svc *v1.Service) (ep v1.Endp return ep, fmt.Errorf("could not find endpoints for service: %v", svc.Name) } -// GetServiceEndpoints returns the endpoints of a service, matched on service name. +// GetServiceEndpointSlices returns the endpoints of a service, matched on service name. func (s *storeToEndpointSliceLister) GetServiceEndpointSlices(svc *v1.Service) (endpointSlices []discovery_v1.EndpointSlice, err error) { - // -- Code to be used when Indexer is set up - //svcNameKey := fmt.Sprintf("%s/%s", svc.Namespace, svc.Name) - //epStore, exists, err := s.Store.GetByKey(svcNameKey) - //ep = *epStore.(*discovery_v1.EndpointSlice) - //if !exists { - // - //} - //if err != nil { - // - //} for _, epStore := range s.Store.List() { ep := *epStore.(*discovery_v1.EndpointSlice) if svc.Name == ep.Labels["kubernetes.io/service-name"] && svc.Namespace == ep.Namespace { endpointSlices = append(endpointSlices, ep) - // Return a list of endpointslices there can be more than one per service. } } if len(endpointSlices) > 0 { return endpointSlices, nil } - return endpointSlices, fmt.Errorf("could not find endpoints for service: %v", svc.Name) + return endpointSlices, fmt.Errorf("could not find endpointslices for service: %v", svc.Name) } // findPort locates the container port for the given pod and portName. If the From 0b687d7f57f8eea511fedcff290c51bad8ac7758 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Wed, 16 Nov 2022 11:24:12 +0000 Subject: [PATCH 19/30] Remove useage of core/v1 Endpoints --- .../deployment/nginx-plus-ingress.yaml | 2 +- internal/k8s/controller.go | 177 +----------------- internal/k8s/controller_test.go | 72 ------- internal/k8s/handlers.go | 34 ---- internal/k8s/task_queue.go | 3 - internal/k8s/utils.go | 16 -- 6 files changed, 5 insertions(+), 299 deletions(-) diff --git a/deployments/deployment/nginx-plus-ingress.yaml b/deployments/deployment/nginx-plus-ingress.yaml index 97a0b08dc1..ac0ebd52f5 100644 --- a/deployments/deployment/nginx-plus-ingress.yaml +++ b/deployments/deployment/nginx-plus-ingress.yaml @@ -20,7 +20,7 @@ spec: serviceAccountName: nginx-ingress automountServiceAccountToken: true containers: - - image: nginx-plus-ingress:2.4.1 + - image: nginx/nginx-ingress:2.4.0-SNAPSHOT-aef1891 imagePullPolicy: IfNotPresent name: nginx-plus-ingress ports: diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index c64e8b8dae..de2e8121d0 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -333,7 +333,6 @@ type namespacedInformer struct { dynInformerFactory dynamicinformer.DynamicSharedInformerFactory ingressLister storeToIngressLister svcLister cache.Store - endpointLister storeToEndpointLister endpointSliceLister storeToEndpointSliceLister podLister indexerToPodLister secretLister cache.Store @@ -361,7 +360,6 @@ func (lbc *LoadBalancerController) newNamespacedInformer(ns string) { // create handlers for resources we care about lbc.addIngressHandler(createIngressHandlers(lbc), nsi) lbc.addServiceHandler(createServiceHandlers(lbc), nsi) - lbc.addEndpointHandler(createEndpointHandlers(lbc), nsi) lbc.addEndpointSliceHandler(createEndpointSliceHandlers(lbc), nsi) lbc.addPodHandler(nsi) @@ -515,17 +513,6 @@ func (lbc *LoadBalancerController) addIngressHandler(handlers cache.ResourceEven lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) } -// addEndpointHandler adds the handler for endpoints to the controller -func (lbc *LoadBalancerController) addEndpointHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { - informer := nsi.sharedInformerFactory.Core().V1().Endpoints().Informer() - informer.AddEventHandler(handlers) - var el storeToEndpointLister - el.Store = informer.GetStore() - nsi.endpointLister = el - - lbc.cacheSyncs = append(lbc.cacheSyncs, informer.HasSynced) -} - // addEndpointSliceHandler adds the handler for EndpointSlices to the controller func (lbc *LoadBalancerController) addEndpointSliceHandler(handlers cache.ResourceEventHandlerFuncs, nsi *namespacedInformer) { informer := nsi.sharedInformerFactory.Discovery().V1().EndpointSlices().Informer() @@ -711,65 +698,6 @@ func (lbc *LoadBalancerController) getNamespacedInformer(ns string) *namespacedI return nsi } -func (lbc *LoadBalancerController) syncEndpoints(task task) { - key := task.Key - var obj interface{} - var endpExists bool - var err error - glog.V(3).Infof("Syncing endpoints %v", key) - - ns, _, _ := cache.SplitMetaNamespaceKey(key) - obj, endpExists, err = lbc.getNamespacedInformer(ns).endpointLister.GetByKey(key) - - if err != nil { - lbc.syncQueue.Requeue(task, err) - return - } - - if !endpExists { - return - } - - endp := obj.(*api_v1.Endpoints) - resources := lbc.configuration.FindResourcesForEndpoints(endp.Namespace, endp.Name) - - resourceExes := lbc.createExtendedResources(resources) - - if len(resourceExes.IngressExes) > 0 { - glog.V(3).Infof("Updating Endpoints for %v", resourceExes.IngressExes) - err = lbc.configurator.UpdateEndpoints(resourceExes.IngressExes) - if err != nil { - glog.Errorf("Error updating endpoints for %v: %v", resourceExes.IngressExes, err) - } - } - - if len(resourceExes.MergeableIngresses) > 0 { - glog.V(3).Infof("Updating Endpoints for %v", resourceExes.MergeableIngresses) - err = lbc.configurator.UpdateEndpointsMergeableIngress(resourceExes.MergeableIngresses) - if err != nil { - glog.Errorf("Error updating endpoints for %v: %v", resourceExes.MergeableIngresses, err) - } - } - - if lbc.areCustomResourcesEnabled { - if len(resourceExes.VirtualServerExes) > 0 { - glog.V(3).Infof("Updating endpoints for %v", resourceExes.VirtualServerExes) - err := lbc.configurator.UpdateEndpointsForVirtualServers(resourceExes.VirtualServerExes) - if err != nil { - glog.Errorf("Error updating endpoints for %v: %v", resourceExes.VirtualServerExes, err) - } - } - - if len(resourceExes.TransportServerExes) > 0 { - glog.V(3).Infof("Updating endpoints for %v", resourceExes.TransportServerExes) - err := lbc.configurator.UpdateEndpointsForTransportServers(resourceExes.TransportServerExes) - if err != nil { - glog.Errorf("Error updating endpoints for %v: %v", resourceExes.TransportServerExes, err) - } - } - } -} - func (lbc *LoadBalancerController) syncEndpointSlices(task task) { key := task.Key var obj interface{} @@ -978,8 +906,6 @@ func (lbc *LoadBalancerController) sync(task task) { lbc.updateTransportServerMetrics() case configMap: lbc.syncConfigMap(task) - case endpoints: - lbc.syncEndpoints(task) case endpointslice: lbc.syncEndpointSlices(task) case secret: @@ -3355,13 +3281,6 @@ func (lbc *LoadBalancerController) getEndpointsForServiceWithSubselector(targetP return nil, fmt.Errorf("error getting pods in namespace %v that match the selector %v: %w", svc.Namespace, labels.Merge(svc.Spec.Selector, subselector), err) } - var svcEps api_v1.Endpoints - svcEps, err = nsi.endpointLister.GetServiceEndpoints(svc) - if err != nil { - glog.V(3).Infof("Error getting endpoints for service %s from the cache: %v", svc.Name, err) - return nil, err - } - var svcEndpointSlices []discovery_v1.EndpointSlice svcEndpointSlices, err = nsi.endpointSliceLister.GetServiceEndpointSlices(svc) if err != nil { @@ -3369,43 +3288,10 @@ func (lbc *LoadBalancerController) getEndpointsForServiceWithSubselector(targetP return nil, err } - if len(svcEndpointSlices) > 0 { - endps = getEndpointsFromEndpointSlicesForSubselectedPods(targetPort, pods, svcEndpointSlices) - return endps, nil - } - - endps = getEndpointsBySubselectedPods(targetPort, pods, svcEps) + endps = getEndpointsFromEndpointSlicesForSubselectedPods(targetPort, pods, svcEndpointSlices) return endps, nil } -func getEndpointsBySubselectedPods(targetPort int32, pods []*api_v1.Pod, svcEps api_v1.Endpoints) (endps []podEndpoint) { - for _, pod := range pods { - for _, subset := range svcEps.Subsets { - for _, port := range subset.Ports { - if port.Port != targetPort { - continue - } - for _, address := range subset.Addresses { - if address.IP == pod.Status.PodIP { - addr := ipv6SafeAddrPort(pod.Status.PodIP, targetPort) - ownerType, ownerName := getPodOwnerTypeAndName(pod) - podEnd := podEndpoint{ - Address: addr, - PodName: getPodName(address.TargetRef), - MeshPodOwner: configs.MeshPodOwner{ - OwnerType: ownerType, - OwnerName: ownerName, - }, - } - endps = append(endps, podEnd) - } - } - } - } - } - return endps -} - func getEndpointsFromEndpointSlicesForSubselectedPods(targetPort int32, pods []*api_v1.Pod, svcEndpointSlices []discovery_v1.EndpointSlice) (podEndpoints []podEndpoint) { endpointSet := make(map[podEndpoint]struct{}) for _, pod := range pods { @@ -3520,10 +3406,6 @@ func (lbc *LoadBalancerController) getExternalEndpointsForIngressBackend(backend } func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networking.IngressBackend, svc *api_v1.Service) (result []podEndpoint, isExternal bool, err error) { - // Get Endpoints from api_v1.Endpoints - var endps api_v1.Endpoints - endps, err = lbc.getNamespacedInformer(svc.Namespace).endpointLister.GetServiceEndpoints(svc) - var endpointSlices []discovery_v1.EndpointSlice endpointSlices, err = lbc.getNamespacedInformer(svc.Namespace).endpointSliceLister.GetServiceEndpointSlices(svc) @@ -3539,17 +3421,9 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ return nil, false, err } - if len(endpointSlices) > 0 { - podEndpoints, podEndpointsErr := lbc.getEndpointsForPortFromEndpointSlices(endpointSlices, backend.Service.Port, svc) - if podEndpointsErr != nil { - return nil, false, podEndpointsErr - } - return podEndpoints, false, nil - } - - result, err = lbc.getEndpointsForPort(endps, backend.Service.Port, svc) + result, err = lbc.getEndpointsForPortFromEndpointSlices(endpointSlices, backend.Service.Port, svc) if err != nil { - glog.V(3).Infof("Error getting endpoints for service %s port %v: %v", svc.Name, configs.GetBackendPortAsString(backend.Service.Port), err) + glog.V(3).Infof("Error getting endpoitnslices for service %s port %v: %v", svc.Name, configs.GetBackendPortAsString(backend.Service.Port), err) return nil, false, err } return result, false, nil @@ -3597,7 +3471,7 @@ func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlices(endpoin } } if len(endpointSet) == 0 { - return nil, fmt.Errorf("no endpoints for target port %v in service %s", targetPort, svc.Name) + return nil, fmt.Errorf("no endpointslices for target port %v in service %s", targetPort, svc.Name) } endpoints := make([]podEndpoint, 0, len(endpointSet)) for ep := range endpointSet { @@ -3606,49 +3480,6 @@ func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlices(endpoin return endpoints, nil } -func (lbc *LoadBalancerController) getEndpointsForPort(endps api_v1.Endpoints, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { - var targetPort int32 - var err error - - for _, port := range svc.Spec.Ports { - if (backendPort.Name == "" && port.Port == backendPort.Number) || port.Name == backendPort.Name { - targetPort, err = lbc.getTargetPort(port, svc) - if err != nil { - return nil, fmt.Errorf("error determining target port for port %v in Ingress: %w", backendPort, err) - } - break - } - } - - if targetPort == 0 { - return nil, fmt.Errorf("no port %v in service %s", backendPort, svc.Name) - } - - for _, subset := range endps.Subsets { - for _, port := range subset.Ports { - if port.Port == targetPort { - var endpoints []podEndpoint - for _, address := range subset.Addresses { - addr := ipv6SafeAddrPort(address.IP, port.Port) - podEnd := podEndpoint{ - Address: addr, - } - if address.TargetRef != nil { - parentType, parentName := lbc.getPodOwnerTypeAndNameFromAddress(address.TargetRef.Namespace, address.TargetRef.Name) - podEnd.OwnerType = parentType - podEnd.OwnerName = parentName - podEnd.PodName = address.TargetRef.Name - } - endpoints = append(endpoints, podEnd) - } - return endpoints, nil - } - } - } - - return nil, fmt.Errorf("no endpoints for target port %v in service %s", targetPort, svc.Name) -} - func (lbc *LoadBalancerController) getPodOwnerTypeAndNameFromAddress(ns, name string) (parentType, parentName string) { var obj interface{} var exists bool diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 2facfd8a5e..490eaaad92 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -645,78 +645,6 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { } } -func TestGetEndpointsBySubselectedPods(t *testing.T) { - boolPointer := func(b bool) *bool { return &b } - tests := []struct { - desc string - targetPort int32 - svcEps api_v1.Endpoints - pods []*api_v1.Pod - expectedEps []podEndpoint - }{ - { - desc: "find one endpoint", - targetPort: 80, - svcEps: api_v1.Endpoints{ - Subsets: []api_v1.EndpointSubset{ - { - Addresses: []api_v1.EndpointAddress{ - { - IP: "1.2.3.4", - Hostname: "asdf.com", - }, - }, - Ports: []api_v1.EndpointPort{ - { - Port: 80, - }, - }, - }, - }, - }, - pods: []*api_v1.Pod{ - { - ObjectMeta: meta_v1.ObjectMeta{ - OwnerReferences: []meta_v1.OwnerReference{ - { - Kind: "Deployment", - Name: "deploy-1", - Controller: boolPointer(true), - }, - }, - }, - Status: api_v1.PodStatus{ - PodIP: "1.2.3.4", - }, - }, - }, - expectedEps: []podEndpoint{ - { - Address: "1.2.3.4:80", - MeshPodOwner: configs.MeshPodOwner{ - OwnerType: "deployment", - OwnerName: "deploy-1", - }, - }, - }, - }, - { - desc: "targetPort mismatch", - targetPort: 21, - expectedEps: nil, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - gotEndps := getEndpointsBySubselectedPods(test.targetPort, test.pods, test.svcEps) - if !reflect.DeepEqual(gotEndps, test.expectedEps) { - t.Errorf("getEndpointsBySubselectedPods() = %v, want %v", gotEndps, test.expectedEps) - } - }) - } -} - func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { endpointPort := int32(8080) diff --git a/internal/k8s/handlers.go b/internal/k8s/handlers.go index 326704d10a..b98b373e07 100644 --- a/internal/k8s/handlers.go +++ b/internal/k8s/handlers.go @@ -62,40 +62,6 @@ func createConfigMapHandlers(lbc *LoadBalancerController, name string) cache.Res } } -// createEndpointHandlers builds the handler funcs for endpoints -func createEndpointHandlers(lbc *LoadBalancerController) cache.ResourceEventHandlerFuncs { - return cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - endpoint := obj.(*v1.Endpoints) - glog.V(3).Infof("Adding endpoints: %v", endpoint.Name) - lbc.AddSyncQueue(obj) - }, - DeleteFunc: func(obj interface{}) { - endpoint, isEndpoint := obj.(*v1.Endpoints) - if !isEndpoint { - deletedState, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - glog.V(3).Infof("Error received unexpected object: %v", obj) - return - } - endpoint, ok = deletedState.Obj.(*v1.Endpoints) - if !ok { - glog.V(3).Infof("Error DeletedFinalStateUnknown contained non-Endpoints object: %v", deletedState.Obj) - return - } - } - glog.V(3).Infof("Removing endpoints: %v", endpoint.Name) - lbc.AddSyncQueue(obj) - }, - UpdateFunc: func(old, cur interface{}) { - if !reflect.DeepEqual(old, cur) { - glog.V(3).Infof("Endpoints %v changed, syncing", cur.(*v1.Endpoints).Name) - lbc.AddSyncQueue(cur) - } - }, - } -} - // createEndpointSliceHandlers builds the handler funcs for EndpointSlices func createEndpointSliceHandlers(lbc *LoadBalancerController) cache.ResourceEventHandlerFuncs { return cache.ResourceEventHandlerFuncs{ diff --git a/internal/k8s/task_queue.go b/internal/k8s/task_queue.go index d96541956e..7406b4bbfd 100644 --- a/internal/k8s/task_queue.go +++ b/internal/k8s/task_queue.go @@ -110,7 +110,6 @@ type kind int // resources const ( ingress = iota - endpoints endpointslice configMap secret @@ -141,8 +140,6 @@ func newTask(key string, obj interface{}) (task, error) { switch t := obj.(type) { case *networking.Ingress: k = ingress - case *v1.Endpoints: - k = endpoints case *discovery_v1.EndpointSlice: k = endpointslice case *v1.ConfigMap: diff --git a/internal/k8s/utils.go b/internal/k8s/utils.go index f192ca8c68..3b8618054b 100644 --- a/internal/k8s/utils.go +++ b/internal/k8s/utils.go @@ -84,27 +84,11 @@ func (ipl indexerToPodLister) ListByNamespace(ns string, selector labels.Selecto return pods, err } -// storeToEndpointLister makes a Store that lists Endpoints -type storeToEndpointLister struct { - cache.Store -} - // Store for EndpointSlices type storeToEndpointSliceLister struct { cache.Store } -// GetServiceEndpoints returns the endpoints of a service, matched on service name. -func (s *storeToEndpointLister) GetServiceEndpoints(svc *v1.Service) (ep v1.Endpoints, err error) { - for _, m := range s.Store.List() { - ep = *m.(*v1.Endpoints) - if svc.Name == ep.Name && svc.Namespace == ep.Namespace { - return ep, nil - } - } - return ep, fmt.Errorf("could not find endpoints for service: %v", svc.Name) -} - // GetServiceEndpointSlices returns the endpoints of a service, matched on service name. func (s *storeToEndpointSliceLister) GetServiceEndpointSlices(svc *v1.Service) (endpointSlices []discovery_v1.EndpointSlice, err error) { for _, epStore := range s.Store.List() { From c0381d9b4d8dcc6205ed7e9e22524c10e0a57081 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Wed, 16 Nov 2022 11:43:47 +0000 Subject: [PATCH 20/30] Update test to ignore order of podEndpoints when comparing --- internal/k8s/controller_test.go | 71 ++++++++++++++++++++------------- 1 file changed, 44 insertions(+), 27 deletions(-) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 490eaaad92..a2f140c438 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -485,15 +485,15 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { } tests := []struct { - desc string - targetPort int32 - svcEndpointSlices []discovery_v1.EndpointSlice - expectedEndpointSlices []podEndpoint + desc string + targetPort int32 + svcEndpointSlices []discovery_v1.EndpointSlice + expectedEndpoints []podEndpoint }{ { desc: "duplicate endpoints in an endpointslice", targetPort: 8080, - expectedEndpointSlices: []podEndpoint{ + expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", }, @@ -523,7 +523,7 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { { desc: "two different endpoints in one endpoint slice", targetPort: 8080, - expectedEndpointSlices: []podEndpoint{ + expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", }, @@ -556,7 +556,7 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { { desc: "duplicate endpoints across two endpointslices", targetPort: 8080, - expectedEndpointSlices: []podEndpoint{ + expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", }, @@ -609,10 +609,10 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { }, }, { - desc: "no endpoints found", - targetPort: 8080, - expectedEndpointSlices: nil, - svcEndpointSlices: []discovery_v1.EndpointSlice{}, + desc: "no endpoints found", + targetPort: 8080, + expectedEndpoints: nil, + svcEndpointSlices: []discovery_v1.EndpointSlice{}, }, } @@ -637,9 +637,9 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { t.Run(test.desc, func(t *testing.T) { gotEndpoints, _ := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &svc) - if !reflect.DeepEqual(gotEndpoints, test.expectedEndpointSlices) { + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { t.Errorf("lbc.getEndpointsForPortFromEndpointSlices() got %v, want %v", - gotEndpoints, test.expectedEndpointSlices) + gotEndpoints, test.expectedEndpoints) } }) } @@ -654,12 +654,12 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { targetPort int32 svcEndpointSlices []discovery_v1.EndpointSlice pods []*api_v1.Pod - expectedEps []podEndpoint + expectedEndpoints []podEndpoint }{ { desc: "find one pod in one endpointslice", targetPort: 8080, - expectedEps: []podEndpoint{ + expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", MeshPodOwner: configs.MeshPodOwner{ @@ -704,7 +704,7 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { { desc: "find one pod in two endpointslices with duplicate endpoints", targetPort: 8080, - expectedEps: []podEndpoint{ + expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", MeshPodOwner: configs.MeshPodOwner{ @@ -763,7 +763,7 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { { desc: "find two pods in one endpointslice", targetPort: 8080, - expectedEps: []podEndpoint{ + expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", MeshPodOwner: configs.MeshPodOwner{ @@ -834,7 +834,7 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { { desc: "find two pods in two endpointslices", targetPort: 8080, - expectedEps: []podEndpoint{ + expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", MeshPodOwner: configs.MeshPodOwner{ @@ -912,9 +912,9 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { }, }, { - desc: "no pods found", - targetPort: 8080, - expectedEps: nil, + desc: "no pods found", + targetPort: 8080, + expectedEndpoints: nil, pods: []*api_v1.Pod{ { ObjectMeta: meta_v1.ObjectMeta{ @@ -949,22 +949,39 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { }, }, { - desc: "targetPort mismatch", - targetPort: 21, - expectedEps: nil, + desc: "targetPort mismatch", + targetPort: 21, + expectedEndpoints: nil, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotEndps := getEndpointsFromEndpointSlicesForSubselectedPods(test.targetPort, test.pods, test.svcEndpointSlices) - if !reflect.DeepEqual(gotEndps, test.expectedEps) { - t.Errorf("getEndpointsFromEndpointSlicesForSubselectedPods() = %v, want %v", gotEndps, test.expectedEps) + gotEndpoints := getEndpointsFromEndpointSlicesForSubselectedPods(test.targetPort, test.pods, test.svcEndpointSlices) + + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + t.Errorf("getEndpointsFromEndpointSlicesForSubselectedPods() = got %v, want %v", gotEndpoints, test.expectedEndpoints) } }) } } +func unorderedEqual(got, want []podEndpoint) bool { + if len(got) != len(want) { + return false + } + exists := make(map[string]bool) + for _, value := range got { + exists[value.Address] = true + } + for _, value := range want { + if !exists[value.Address] { + return false + } + } + return true +} + func TestGetStatusFromEventTitle(t *testing.T) { tests := []struct { eventTitle string From 435325b0e52b4463a7debb6a7f133829d5bdc52b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Wed, 16 Nov 2022 11:49:52 +0000 Subject: [PATCH 21/30] Revert deployment file --- deployments/deployment/nginx-plus-ingress.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployments/deployment/nginx-plus-ingress.yaml b/deployments/deployment/nginx-plus-ingress.yaml index ac0ebd52f5..97a0b08dc1 100644 --- a/deployments/deployment/nginx-plus-ingress.yaml +++ b/deployments/deployment/nginx-plus-ingress.yaml @@ -20,7 +20,7 @@ spec: serviceAccountName: nginx-ingress automountServiceAccountToken: true containers: - - image: nginx/nginx-ingress:2.4.0-SNAPSHOT-aef1891 + - image: nginx-plus-ingress:2.4.1 imagePullPolicy: IfNotPresent name: nginx-plus-ingress ports: From a27324d734c5d6d2084d3c74d4d8d1ca975cb6ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Wed, 16 Nov 2022 13:24:46 +0000 Subject: [PATCH 22/30] Remove endpoints from rbac --- deployments/rbac/rbac.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deployments/rbac/rbac.yaml b/deployments/rbac/rbac.yaml index 57ed7e9baf..af4b44e6eb 100644 --- a/deployments/rbac/rbac.yaml +++ b/deployments/rbac/rbac.yaml @@ -15,7 +15,6 @@ rules: - "" resources: - services - - endpoints verbs: - get - list From b10c8e4838d438fa881bca4f281f7a14723c189a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Wed, 16 Nov 2022 14:18:48 +0000 Subject: [PATCH 23/30] Update rbac for helm --- deployments/helm-chart/templates/rbac.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/deployments/helm-chart/templates/rbac.yaml b/deployments/helm-chart/templates/rbac.yaml index ad95710a86..f1070842ec 100644 --- a/deployments/helm-chart/templates/rbac.yaml +++ b/deployments/helm-chart/templates/rbac.yaml @@ -30,11 +30,18 @@ rules: - watch - list {{- end }} +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch - apiGroups: - "" resources: - services - - endpoints verbs: - get - list From a4a02014281da4adcd596d915d8834c1bf43f4ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Mon, 21 Nov 2022 11:22:46 +0000 Subject: [PATCH 24/30] Fix typo --- internal/k8s/controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index de2e8121d0..82c82de835 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -3423,7 +3423,7 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ result, err = lbc.getEndpointsForPortFromEndpointSlices(endpointSlices, backend.Service.Port, svc) if err != nil { - glog.V(3).Infof("Error getting endpoitnslices for service %s port %v: %v", svc.Name, configs.GetBackendPortAsString(backend.Service.Port), err) + glog.V(3).Infof("Error getting endpointslices for service %s port %v: %v", svc.Name, configs.GetBackendPortAsString(backend.Service.Port), err) return nil, false, err } return result, false, nil From a5ddf0450cb6a9b20ba379ce48b2cbaa9950c7f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Tue, 22 Nov 2022 09:45:08 +0000 Subject: [PATCH 25/30] Add tests for targetPort being 0 --- internal/k8s/controller.go | 15 +++--- internal/k8s/controller_test.go | 92 +++++++++++++++++++++++++++------ 2 files changed, 82 insertions(+), 25 deletions(-) diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index 82c82de835..6c242739dc 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -3429,25 +3429,24 @@ func (lbc *LoadBalancerController) getEndpointsForIngressBackend(backend *networ return result, false, nil } -func (lbc *LoadBalancerController) validateTargetPort(backendPort networking.ServiceBackendPort, svc *api_v1.Service) (targetPort int32, err error) { +func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlices(endpointSlices []discovery_v1.EndpointSlice, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { + var targetPort int32 + var err error + for _, port := range svc.Spec.Ports { if (backendPort.Name == "" && port.Port == backendPort.Number) || port.Name == backendPort.Name { targetPort, err = lbc.getTargetPort(port, svc) if err != nil { - return 0, fmt.Errorf("error determining target port for port %v in Ingress: %w", backendPort, err) + return nil, fmt.Errorf("error determining target port for port %v in Ingress: %w", backendPort, err) } break } } - return targetPort, nil -} - -func (lbc *LoadBalancerController) getEndpointsForPortFromEndpointSlices(endpointSlices []discovery_v1.EndpointSlice, backendPort networking.ServiceBackendPort, svc *api_v1.Service) ([]podEndpoint, error) { - targetPort, err := lbc.validateTargetPort(backendPort, svc) - if err != nil { + if targetPort == 0 { return nil, fmt.Errorf("no port %v in service %s", backendPort, svc.Name) } + endpointSet := make(map[podEndpoint]struct{}) for _, endpointSlice := range endpointSlices { for _, endpointSlicePort := range endpointSlice.Ports { diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index a2f140c438..80e3a046a4 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -472,7 +472,7 @@ func TestFormatWarningsMessages(t *testing.T) { } } -func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { +func TestGetEndpointsFromEndpointSlices(t *testing.T) { endpointPort := int32(8080) lbc := LoadBalancerController{ @@ -487,12 +487,30 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { tests := []struct { desc string targetPort int32 + svc api_v1.Service svcEndpointSlices []discovery_v1.EndpointSlice expectedEndpoints []podEndpoint }{ { desc: "duplicate endpoints in an endpointslice", targetPort: 8080, + svc: api_v1.Service{ + TypeMeta: meta_v1.TypeMeta{}, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "coffee-svc", + Namespace: "default", + }, + Spec: api_v1.ServiceSpec{ + Ports: []api_v1.ServicePort{ + { + Name: "foo", + Port: 80, + TargetPort: intstr.FromInt(8080), + }, + }, + }, + Status: api_v1.ServiceStatus{}, + }, expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", @@ -523,6 +541,23 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { { desc: "two different endpoints in one endpoint slice", targetPort: 8080, + svc: api_v1.Service{ + TypeMeta: meta_v1.TypeMeta{}, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "coffee-svc", + Namespace: "default", + }, + Spec: api_v1.ServiceSpec{ + Ports: []api_v1.ServicePort{ + { + Name: "foo", + Port: 80, + TargetPort: intstr.FromInt(8080), + }, + }, + }, + Status: api_v1.ServiceStatus{}, + }, expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", @@ -556,6 +591,23 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { { desc: "duplicate endpoints across two endpointslices", targetPort: 8080, + svc: api_v1.Service{ + TypeMeta: meta_v1.TypeMeta{}, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "coffee-svc", + Namespace: "default", + }, + Spec: api_v1.ServiceSpec{ + Ports: []api_v1.ServicePort{ + { + Name: "foo", + Port: 80, + TargetPort: intstr.FromInt(8080), + }, + }, + }, + Status: api_v1.ServiceStatus{}, + }, expectedEndpoints: []podEndpoint{ { Address: "1.2.3.4:8080", @@ -614,28 +666,34 @@ func TestGetEndpointsFromEndpointSlicesDeduplication(t *testing.T) { expectedEndpoints: nil, svcEndpointSlices: []discovery_v1.EndpointSlice{}, }, - } - - svc := api_v1.Service{ - TypeMeta: meta_v1.TypeMeta{}, - ObjectMeta: meta_v1.ObjectMeta{ - Name: "coffee-svc", - Namespace: "default", - }, - Spec: api_v1.ServiceSpec{ - Ports: []api_v1.ServicePort{ - { - Name: "foo", - Port: 80, - TargetPort: intstr.FromInt(8080), + { + desc: "service target port is 0", + targetPort: 8080, + svc: api_v1.Service{ + TypeMeta: meta_v1.TypeMeta{}, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "coffee-svc", + Namespace: "default", }, + Spec: api_v1.ServiceSpec{ + Ports: []api_v1.ServicePort{ + { + Name: "foo", + Port: 0, + TargetPort: intstr.FromInt(0), + }, + }, + }, + Status: api_v1.ServiceStatus{}, }, + expectedEndpoints: nil, + svcEndpointSlices: nil, }, - Status: api_v1.ServiceStatus{}, } + for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotEndpoints, _ := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &svc) + gotEndpoints, _ := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { t.Errorf("lbc.getEndpointsForPortFromEndpointSlices() got %v, want %v", From 8b0f615eb3c294f61a4f310d03a5e77cd701fb42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Tue, 22 Nov 2022 11:16:07 +0000 Subject: [PATCH 26/30] Update unit tests --- internal/k8s/controller_test.go | 104 +++++++++++++++++++++++++++++--- 1 file changed, 94 insertions(+), 10 deletions(-) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 80e3a046a4..c64043f6ee 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -660,10 +660,60 @@ func TestGetEndpointsFromEndpointSlices(t *testing.T) { }, }, }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotEndpoints, err := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) + if err != nil { + t.Fatal(err) + } + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + t.Errorf("lbc.getEndpointsForPortFromEndpointSlices() got %v, want %v", + gotEndpoints, test.expectedEndpoints) + } + }) + } +} + +func TestGetEndpointsFromEndpointSlicesErrors(t *testing.T) { + lbc := LoadBalancerController{ + isNginxPlus: true, + } + + backendServicePort := networking.ServiceBackendPort{ + Number: 8080, + Name: "foo", + } + + tests := []struct { + desc string + targetPort int32 + svc api_v1.Service + svcEndpointSlices []discovery_v1.EndpointSlice + expectedErr string + }{ { - desc: "no endpoints found", - targetPort: 8080, - expectedEndpoints: nil, + desc: "no endpoints found", + targetPort: 8080, + svc: api_v1.Service{ + TypeMeta: meta_v1.TypeMeta{}, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "coffee-svc", + Namespace: "default", + }, + Spec: api_v1.ServiceSpec{ + Ports: []api_v1.ServicePort{ + { + Name: "foo", + Port: 80, + TargetPort: intstr.FromInt(8080), + }, + }, + }, + Status: api_v1.ServiceStatus{}, + }, + expectedErr: "no endpointslices for target port 8080 in service coffee-svc", svcEndpointSlices: []discovery_v1.EndpointSlice{}, }, { @@ -686,18 +736,20 @@ func TestGetEndpointsFromEndpointSlices(t *testing.T) { }, Status: api_v1.ServiceStatus{}, }, - expectedEndpoints: nil, + expectedErr: "no port {foo 8080} in service coffee-svc", svcEndpointSlices: nil, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotEndpoints, _ := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) - - if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + gotEndpoints, err := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) + if err == nil { + t.Fatal(err) + } + if result := cmp.Diff(err.Error(), test.expectedErr); result != "" { t.Errorf("lbc.getEndpointsForPortFromEndpointSlices() got %v, want %v", - gotEndpoints, test.expectedEndpoints) + gotEndpoints, test.expectedErr) } }) } @@ -1007,8 +1059,40 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { }, }, { - desc: "targetPort mismatch", - targetPort: 21, + desc: "targetPort mismatch", + targetPort: 21, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + }, + }, + }, + pods: []*api_v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + }, expectedEndpoints: nil, }, } From ff0aaf3f23be864e1ea74fc9eeb06c8a5b423604 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Tue, 22 Nov 2022 11:26:36 +0000 Subject: [PATCH 27/30] Remove unused target port --- internal/k8s/controller_test.go | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index c64043f6ee..8ca9c8f90c 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -486,14 +486,12 @@ func TestGetEndpointsFromEndpointSlices(t *testing.T) { tests := []struct { desc string - targetPort int32 svc api_v1.Service svcEndpointSlices []discovery_v1.EndpointSlice expectedEndpoints []podEndpoint }{ { - desc: "duplicate endpoints in an endpointslice", - targetPort: 8080, + desc: "duplicate endpoints in an endpointslice", svc: api_v1.Service{ TypeMeta: meta_v1.TypeMeta{}, ObjectMeta: meta_v1.ObjectMeta{ @@ -539,8 +537,7 @@ func TestGetEndpointsFromEndpointSlices(t *testing.T) { }, }, { - desc: "two different endpoints in one endpoint slice", - targetPort: 8080, + desc: "two different endpoints in one endpoint slice", svc: api_v1.Service{ TypeMeta: meta_v1.TypeMeta{}, ObjectMeta: meta_v1.ObjectMeta{ @@ -589,8 +586,7 @@ func TestGetEndpointsFromEndpointSlices(t *testing.T) { }, }, { - desc: "duplicate endpoints across two endpointslices", - targetPort: 8080, + desc: "duplicate endpoints across two endpointslices", svc: api_v1.Service{ TypeMeta: meta_v1.TypeMeta{}, ObjectMeta: meta_v1.ObjectMeta{ @@ -688,14 +684,12 @@ func TestGetEndpointsFromEndpointSlicesErrors(t *testing.T) { tests := []struct { desc string - targetPort int32 svc api_v1.Service svcEndpointSlices []discovery_v1.EndpointSlice expectedErr string }{ { - desc: "no endpoints found", - targetPort: 8080, + desc: "no endpoints found", svc: api_v1.Service{ TypeMeta: meta_v1.TypeMeta{}, ObjectMeta: meta_v1.ObjectMeta{ @@ -717,8 +711,7 @@ func TestGetEndpointsFromEndpointSlicesErrors(t *testing.T) { svcEndpointSlices: []discovery_v1.EndpointSlice{}, }, { - desc: "service target port is 0", - targetPort: 8080, + desc: "service target port is 0", svc: api_v1.Service{ TypeMeta: meta_v1.TypeMeta{}, ObjectMeta: meta_v1.ObjectMeta{ From 78929a540d59079f4a406e94ba2311edbaf19eb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Tue, 22 Nov 2022 11:42:06 +0000 Subject: [PATCH 28/30] Update error message for TestGetEndpointsFromEndpointSlicesErrors --- internal/k8s/controller_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 8ca9c8f90c..6a5cdf2d98 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -736,13 +736,13 @@ func TestGetEndpointsFromEndpointSlicesErrors(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotEndpoints, err := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) + _, err := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) if err == nil { t.Fatal(err) } if result := cmp.Diff(err.Error(), test.expectedErr); result != "" { t.Errorf("lbc.getEndpointsForPortFromEndpointSlices() got %v, want %v", - gotEndpoints, test.expectedErr) + err.Error(), test.expectedErr) } }) } From 57436172aa0860f3d41bf7e825917978b6626523 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Tue, 22 Nov 2022 15:33:25 +0000 Subject: [PATCH 29/30] Refactor tests for endpointslices --- internal/k8s/controller_test.go | 309 +++++++++++++++++++++++++++++--- 1 file changed, 289 insertions(+), 20 deletions(-) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 6a5cdf2d98..47da4f06f0 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -472,7 +472,7 @@ func TestFormatWarningsMessages(t *testing.T) { } } -func TestGetEndpointsFromEndpointSlices(t *testing.T) { +func TestGetEndpointsFromEndpointSlices_DuplicateEndpointsInOneEndpointSlice(t *testing.T) { endpointPort := int32(8080) lbc := LoadBalancerController{ @@ -536,6 +536,40 @@ func TestGetEndpointsFromEndpointSlices(t *testing.T) { }, }, }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotEndpoints, err := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) + if err != nil { + t.Fatal(err) + } + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + t.Errorf("lbc.getEndpointsForPortFromEndpointSlices() got %v, want %v", + gotEndpoints, test.expectedEndpoints) + } + }) + } +} + +func TestGetEndpointsFromEndpointSlices_TwoDifferentEndpointsInOnEndpointSlice(t *testing.T) { + endpointPort := int32(8080) + + lbc := LoadBalancerController{ + isNginxPlus: true, + } + + backendServicePort := networking.ServiceBackendPort{ + Number: 8080, + Name: "foo", + } + + tests := []struct { + desc string + svc api_v1.Service + svcEndpointSlices []discovery_v1.EndpointSlice + expectedEndpoints []podEndpoint + }{ { desc: "two different endpoints in one endpoint slice", svc: api_v1.Service{ @@ -585,6 +619,40 @@ func TestGetEndpointsFromEndpointSlices(t *testing.T) { }, }, }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotEndpoints, err := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) + if err != nil { + t.Fatal(err) + } + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + t.Errorf("lbc.getEndpointsForPortFromEndpointSlices() got %v, want %v", + gotEndpoints, test.expectedEndpoints) + } + }) + } +} + +func TestGetEndpointsFromEndpointSlices_DuplicateEndpointsAcrossTwoEndpointSlices(t *testing.T) { + endpointPort := int32(8080) + + lbc := LoadBalancerController{ + isNginxPlus: true, + } + + backendServicePort := networking.ServiceBackendPort{ + Number: 8080, + Name: "foo", + } + + tests := []struct { + desc string + svc api_v1.Service + svcEndpointSlices []discovery_v1.EndpointSlice + expectedEndpoints []podEndpoint + }{ { desc: "duplicate endpoints across two endpointslices", svc: api_v1.Service{ @@ -672,7 +740,9 @@ func TestGetEndpointsFromEndpointSlices(t *testing.T) { } } -func TestGetEndpointsFromEndpointSlicesErrors(t *testing.T) { +func TestGetEndpointsFromEndpointSlices_ErrorsOnInvalidTargetPort(t *testing.T) { + endpointPort := int32(8080) + lbc := LoadBalancerController{ isNginxPlus: true, } @@ -686,10 +756,9 @@ func TestGetEndpointsFromEndpointSlicesErrors(t *testing.T) { desc string svc api_v1.Service svcEndpointSlices []discovery_v1.EndpointSlice - expectedErr string }{ { - desc: "no endpoints found", + desc: "Target Port should be 0", svc: api_v1.Service{ TypeMeta: meta_v1.TypeMeta{}, ObjectMeta: meta_v1.ObjectMeta{ @@ -700,18 +769,65 @@ func TestGetEndpointsFromEndpointSlicesErrors(t *testing.T) { Ports: []api_v1.ServicePort{ { Name: "foo", - Port: 80, - TargetPort: intstr.FromInt(8080), + Port: 0, + TargetPort: intstr.FromInt(0), }, }, }, Status: api_v1.ServiceStatus{}, }, - expectedErr: "no endpointslices for target port 8080 in service coffee-svc", - svcEndpointSlices: []discovery_v1.EndpointSlice{}, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + { + Addresses: []string{ + "5.6.7.8", + }, + }, + }, + }, + }, }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + _, err := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) + if err == nil { + t.Logf("%s but was %+v\n", test.desc, test.svc.Spec.Ports[0].TargetPort.IntVal) + t.Fatal("want error, got nil") + } + }) + } +} + +func TestGetEndpointsFromEndpointSlices_ErrorsOnNoEndpointSlicesFound(t *testing.T) { + lbc := LoadBalancerController{ + isNginxPlus: true, + } + + backendServicePort := networking.ServiceBackendPort{ + Number: 8080, + Name: "foo", + } + + tests := []struct { + desc string + svc api_v1.Service + svcEndpointSlices []discovery_v1.EndpointSlice + }{ { - desc: "service target port is 0", + desc: "No EndpointSlices should be found", svc: api_v1.Service{ TypeMeta: meta_v1.TypeMeta{}, ObjectMeta: meta_v1.ObjectMeta{ @@ -722,15 +838,14 @@ func TestGetEndpointsFromEndpointSlicesErrors(t *testing.T) { Ports: []api_v1.ServicePort{ { Name: "foo", - Port: 0, - TargetPort: intstr.FromInt(0), + Port: 80, + TargetPort: intstr.FromInt(8080), }, }, }, Status: api_v1.ServiceStatus{}, }, - expectedErr: "no port {foo 8080} in service coffee-svc", - svcEndpointSlices: nil, + svcEndpointSlices: []discovery_v1.EndpointSlice{}, }, } @@ -738,17 +853,14 @@ func TestGetEndpointsFromEndpointSlicesErrors(t *testing.T) { t.Run(test.desc, func(t *testing.T) { _, err := lbc.getEndpointsForPortFromEndpointSlices(test.svcEndpointSlices, backendServicePort, &test.svc) if err == nil { - t.Fatal(err) - } - if result := cmp.Diff(err.Error(), test.expectedErr); result != "" { - t.Errorf("lbc.getEndpointsForPortFromEndpointSlices() got %v, want %v", - err.Error(), test.expectedErr) + t.Logf("%s but got %+v\n", test.desc, test.svcEndpointSlices) + t.Fatal("want error, got nil") } }) } } -func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { +func TestGetEndpointSlicesBySubselectedPods_FindOnePodInOneEndpointSlice(t *testing.T) { endpointPort := int32(8080) boolPointer := func(b bool) *bool { return &b } @@ -804,6 +916,30 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { }, }, }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotEndpoints := getEndpointsFromEndpointSlicesForSubselectedPods(test.targetPort, test.pods, test.svcEndpointSlices) + + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + t.Errorf("getEndpointsFromEndpointSlicesForSubselectedPods() = got %v, want %v", gotEndpoints, test.expectedEndpoints) + } + }) + } +} + +func TestGetEndpointSlicesBySubselectedPods_FindOnePodInTwoEndpointSlicesWithDuplicateEndpoints(t *testing.T) { + endpointPort := int32(8080) + + boolPointer := func(b bool) *bool { return &b } + tests := []struct { + desc string + targetPort int32 + svcEndpointSlices []discovery_v1.EndpointSlice + pods []*api_v1.Pod + expectedEndpoints []podEndpoint + }{ { desc: "find one pod in two endpointslices with duplicate endpoints", targetPort: 8080, @@ -863,6 +999,30 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { }, }, }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotEndpoints := getEndpointsFromEndpointSlicesForSubselectedPods(test.targetPort, test.pods, test.svcEndpointSlices) + + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + t.Errorf("getEndpointsFromEndpointSlicesForSubselectedPods() = got %v, want %v", gotEndpoints, test.expectedEndpoints) + } + }) + } +} + +func TestGetEndpointSlicesBySubselectedPods_FindTwoPodsInOneEndpointSlice(t *testing.T) { + endpointPort := int32(8080) + + boolPointer := func(b bool) *bool { return &b } + tests := []struct { + desc string + targetPort int32 + svcEndpointSlices []discovery_v1.EndpointSlice + pods []*api_v1.Pod + expectedEndpoints []podEndpoint + }{ { desc: "find two pods in one endpointslice", targetPort: 8080, @@ -934,6 +1094,30 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { }, }, }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotEndpoints := getEndpointsFromEndpointSlicesForSubselectedPods(test.targetPort, test.pods, test.svcEndpointSlices) + + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + t.Errorf("getEndpointsFromEndpointSlicesForSubselectedPods() = got %v, want %v", gotEndpoints, test.expectedEndpoints) + } + }) + } +} + +func TestGetEndpointSlicesBySubselectedPods_FindTwoPodsInTwoEndpointSlice(t *testing.T) { + endpointPort := int32(8080) + + boolPointer := func(b bool) *bool { return &b } + tests := []struct { + desc string + targetPort int32 + svcEndpointSlices []discovery_v1.EndpointSlice + pods []*api_v1.Pod + expectedEndpoints []podEndpoint + }{ { desc: "find two pods in two endpointslices", targetPort: 8080, @@ -1014,8 +1198,32 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { }, }, }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotEndpoints := getEndpointsFromEndpointSlicesForSubselectedPods(test.targetPort, test.pods, test.svcEndpointSlices) + + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + t.Errorf("getEndpointsFromEndpointSlicesForSubselectedPods() = got %v, want %v", gotEndpoints, test.expectedEndpoints) + } + }) + } +} + +func TestGetEndpointSlicesBySubselectedPods_FindNoPods(t *testing.T) { + endpointPort := int32(8080) + + boolPointer := func(b bool) *bool { return &b } + tests := []struct { + desc string + targetPort int32 + svcEndpointSlices []discovery_v1.EndpointSlice + pods []*api_v1.Pod + expectedEndpoints []podEndpoint + }{ { - desc: "no pods found", + desc: "find no pods", targetPort: 8080, expectedEndpoints: nil, pods: []*api_v1.Pod{ @@ -1101,6 +1309,67 @@ func TestGetEndpointSlicesBySubselectedPods(t *testing.T) { } } +func TestGetEndpointSlicesBySubselectedPods_TargetPortMismatch(t *testing.T) { + endpointPort := int32(8080) + + boolPointer := func(b bool) *bool { return &b } + tests := []struct { + desc string + targetPort int32 + svcEndpointSlices []discovery_v1.EndpointSlice + pods []*api_v1.Pod + expectedEndpoints []podEndpoint + }{ + { + desc: "targetPort mismatch", + targetPort: 21, + svcEndpointSlices: []discovery_v1.EndpointSlice{ + { + Ports: []discovery_v1.EndpointPort{ + { + Port: &endpointPort, + }, + }, + Endpoints: []discovery_v1.Endpoint{ + { + Addresses: []string{ + "1.2.3.4", + }, + }, + }, + }, + }, + pods: []*api_v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + OwnerReferences: []meta_v1.OwnerReference{ + { + Kind: "Deployment", + Name: "deploy-1", + Controller: boolPointer(true), + }, + }, + }, + Status: api_v1.PodStatus{ + PodIP: "1.2.3.4", + }, + }, + }, + expectedEndpoints: nil, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotEndpoints := getEndpointsFromEndpointSlicesForSubselectedPods(test.targetPort, test.pods, test.svcEndpointSlices) + + if result := unorderedEqual(gotEndpoints, test.expectedEndpoints); !result { + t.Errorf("getEndpointsFromEndpointSlicesForSubselectedPods() = got %v, want %v", gotEndpoints, test.expectedEndpoints) + } + }) + } +} + func unorderedEqual(got, want []podEndpoint) bool { if len(got) != len(want) { return false From a07d4abc42739b7151020d1f53d67623a95593ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cshaun-nx=E2=80=9D?= <“s.odonovan@f5.com”> Date: Tue, 22 Nov 2022 15:48:09 +0000 Subject: [PATCH 30/30] Fix function names --- internal/k8s/controller_test.go | 39 +-------------------------------- 1 file changed, 1 insertion(+), 38 deletions(-) diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 47da4f06f0..4a308c27c1 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -1107,7 +1107,7 @@ func TestGetEndpointSlicesBySubselectedPods_FindTwoPodsInOneEndpointSlice(t *tes } } -func TestGetEndpointSlicesBySubselectedPods_FindTwoPodsInTwoEndpointSlice(t *testing.T) { +func TestGetEndpointSlicesBySubselectedPods_FindTwoPodsInTwoEndpointSlices(t *testing.T) { endpointPort := int32(8080) boolPointer := func(b bool) *bool { return &b } @@ -1259,43 +1259,6 @@ func TestGetEndpointSlicesBySubselectedPods_FindNoPods(t *testing.T) { }, }, }, - { - desc: "targetPort mismatch", - targetPort: 21, - svcEndpointSlices: []discovery_v1.EndpointSlice{ - { - Ports: []discovery_v1.EndpointPort{ - { - Port: &endpointPort, - }, - }, - Endpoints: []discovery_v1.Endpoint{ - { - Addresses: []string{ - "1.2.3.4", - }, - }, - }, - }, - }, - pods: []*api_v1.Pod{ - { - ObjectMeta: meta_v1.ObjectMeta{ - OwnerReferences: []meta_v1.OwnerReference{ - { - Kind: "Deployment", - Name: "deploy-1", - Controller: boolPointer(true), - }, - }, - }, - Status: api_v1.PodStatus{ - PodIP: "1.2.3.4", - }, - }, - }, - expectedEndpoints: nil, - }, } for _, test := range tests {