diff --git a/config/crd/bases/k8s.nginx.org_policies.yaml b/config/crd/bases/k8s.nginx.org_policies.yaml index de6bef324b..7fc24a6fae 100644 --- a/config/crd/bases/k8s.nginx.org_policies.yaml +++ b/config/crd/bases/k8s.nginx.org_policies.yaml @@ -172,6 +172,8 @@ spec: type: string rejectCode: type: integer + scale: + type: boolean zoneSize: type: string type: object diff --git a/deploy/crds.yaml b/deploy/crds.yaml index aed959d337..10d5400dd5 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -374,6 +374,8 @@ spec: type: string rejectCode: type: integer + scale: + type: boolean zoneSize: type: string type: object diff --git a/docs/content/configuration/ingress-resources/advanced-configuration-with-annotations.md b/docs/content/configuration/ingress-resources/advanced-configuration-with-annotations.md index 5c96acc8e6..bd28023273 100644 --- a/docs/content/configuration/ingress-resources/advanced-configuration-with-annotations.md +++ b/docs/content/configuration/ingress-resources/advanced-configuration-with-annotations.md @@ -187,6 +187,7 @@ The table below summarizes the available annotations. |``nginx.org/limit-req-dry-run`` | N/A | Enables the dry run mode. In this mode, the rate limit is not actually applied, but the number of excessive requests is accounted as usual in the shared memory zone. | false | true | |``nginx.org/limit-req-log-level`` | N/A | Sets the desired logging level for cases when the server refuses to process requests due to rate exceeding, or delays request processing. Allowed values are info, notice, warn or error. | error | info | |``nginx.org/limit-req-reject-code`` | N/A | Sets the status code to return in response to rejected requests. Must fall into the range 400..599. | 429 | 503 | +|``nginx.org/limit-req-scale`` | N/A | Enables a constant rate-limit by dividing the configured rate by the number of nginx-ingress pods currently serving traffic. This adjustment ensures that the rate-limit remains consistent, even as the number of nginx-pods fluctuates due to autoscaling. Note: This will not work properly if requests from a client are not evenly distributed accross all ingress pods (sticky sessions, long lived TCP-Connections with many requests etc.). In such cases using NGINX+'s zone-sync feature instead would give better results. | false | true | {{% /table %}} ### Snippets and Custom Templates diff --git a/docs/content/configuration/policy-resource.md b/docs/content/configuration/policy-resource.md index 35c8962d07..5b96c58f27 100644 --- a/docs/content/configuration/policy-resource.md +++ b/docs/content/configuration/policy-resource.md @@ -124,6 +124,7 @@ rateLimit: |``dryRun`` | Enables the dry run mode. In this mode, the rate limit is not actually applied, but the number of excessive requests is accounted as usual in the shared memory zone. | ``bool`` | No | |``logLevel`` | Sets the desired logging level for cases when the server refuses to process requests due to rate exceeding, or delays request processing. Allowed values are ``info``, ``notice``, ``warn`` or ``error``. Default is ``error``. | ``string`` | No | |``rejectCode`` | Sets the status code to return in response to rejected requests. Must fall into the range ``400..599``. Default is ``503``. | ``int`` | No | +|``scale`` | Enables a constant rate-limit by dividing the configured rate by the number of nginx-ingress pods currently serving traffic. This adjustment ensures that the rate-limit remains consistent, even as the number of nginx-pods fluctuates due to autoscaling. Note: This will not work properly if requests from a client are not evenly distributed accross all ingress pods (sticky sessions, long lived TCP-Connections with many requests etc.). In such cases using NGINX+'s zone-sync feature instead would give better results. | ``bool`` | No | {{% /table %}} > For each policy referenced in a VirtualServer and/or its VirtualServerRoutes, NGINX Ingress Controller will generate a single rate limiting zone defined by the [`limit_req_zone`](http://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) directive. If two VirtualServer resources reference the same policy, NGINX Ingress Controller will generate two different rate limiting zones, one zone per VirtualServer. diff --git a/internal/configs/annotations.go b/internal/configs/annotations.go index b0eb8d2291..f678b6d7c0 100644 --- a/internal/configs/annotations.go +++ b/internal/configs/annotations.go @@ -90,6 +90,7 @@ var minionInheritanceList = map[string]bool{ "nginx.org/limit-req-dry-run": true, "nginx.org/limit-req-log-level": true, "nginx.org/limit-req-reject-code": true, + "nginx.org/limit-req-scale": true, } var validPathRegex = map[string]bool{ @@ -510,6 +511,13 @@ func parseRateLimitAnnotations(annotations map[string]string, cfgParams *ConfigP cfgParams.LimitReqRejectCode = requestRateRejectCode } } + if requestRateScale, exists, err := GetMapKeyAsBool(annotations, "nginx.org/limit-req-scale", context); exists { + if err != nil { + errors = append(errors, err) + } else { + cfgParams.LimitReqScale = requestRateScale + } + } return errors } diff --git a/internal/configs/config_params.go b/internal/configs/config_params.go index 28a6654ce1..ce011400bc 100644 --- a/internal/configs/config_params.go +++ b/internal/configs/config_params.go @@ -125,6 +125,7 @@ type ConfigParams struct { LimitReqDryRun bool LimitReqLogLevel string LimitReqRejectCode int + LimitReqScale bool } // StaticConfigParams holds immutable NGINX configuration parameters that affect the main NGINX config. diff --git a/internal/configs/configurator.go b/internal/configs/configurator.go index 9828c04a61..4b84efb11f 100644 --- a/internal/configs/configurator.go +++ b/internal/configs/configurator.go @@ -136,6 +136,7 @@ type Configurator struct { isLatencyMetricsEnabled bool isReloadsEnabled bool isDynamicSSLReloadEnabled bool + ingressControllerReplicas int } // ConfiguratorParams is a collection of parameters used for the @@ -391,15 +392,16 @@ func (cnf *Configurator) addOrUpdateIngress(ingEx *IngressEx) (bool, Warnings, e isMinion := false nginxCfg, warnings := generateNginxCfg(NginxCfgParams{ - staticParams: cnf.staticCfgParams, - ingEx: ingEx, - apResources: apResources, - dosResource: dosResource, - isMinion: isMinion, - isPlus: cnf.isPlus, - baseCfgParams: cnf.cfgParams, - isResolverConfigured: cnf.IsResolverConfigured(), - isWildcardEnabled: cnf.isWildcardEnabled, + staticParams: cnf.staticCfgParams, + ingEx: ingEx, + apResources: apResources, + dosResource: dosResource, + isMinion: isMinion, + isPlus: cnf.isPlus, + baseCfgParams: cnf.cfgParams, + isResolverConfigured: cnf.IsResolverConfigured(), + isWildcardEnabled: cnf.isWildcardEnabled, + ingressControllerReplicas: cnf.ingressControllerReplicas, }) name := objectMetaToFileName(&ingEx.Ingress.ObjectMeta) @@ -454,14 +456,15 @@ func (cnf *Configurator) addOrUpdateMergeableIngress(mergeableIngs *MergeableIng } nginxCfg, warnings := generateNginxCfgForMergeableIngresses(NginxCfgParams{ - mergeableIngs: mergeableIngs, - apResources: apResources, - dosResource: dosResource, - baseCfgParams: cnf.cfgParams, - isPlus: cnf.isPlus, - isResolverConfigured: cnf.IsResolverConfigured(), - staticParams: cnf.staticCfgParams, - isWildcardEnabled: cnf.isWildcardEnabled, + mergeableIngs: mergeableIngs, + apResources: apResources, + dosResource: dosResource, + baseCfgParams: cnf.cfgParams, + isPlus: cnf.isPlus, + isResolverConfigured: cnf.IsResolverConfigured(), + staticParams: cnf.staticCfgParams, + isWildcardEnabled: cnf.isWildcardEnabled, + ingressControllerReplicas: cnf.ingressControllerReplicas, }) name := objectMetaToFileName(&mergeableIngs.Master.Ingress.ObjectMeta) @@ -607,6 +610,7 @@ func (cnf *Configurator) addOrUpdateVirtualServer(virtualServerEx *VirtualServer name := getFileNameForVirtualServer(virtualServerEx.VirtualServer) vsc := newVirtualServerConfigurator(cnf.cfgParams, cnf.isPlus, cnf.IsResolverConfigured(), cnf.staticCfgParams, cnf.isWildcardEnabled, nil) + vsc.IngressControllerReplicas = cnf.ingressControllerReplicas vsCfg, warnings := vsc.GenerateVirtualServerConfig(virtualServerEx, apResources, dosResources) content, err := cnf.templateExecutorV2.ExecuteVirtualServerTemplate(&vsCfg) if err != nil { @@ -2009,3 +2013,14 @@ func (cnf *Configurator) DynamicSSLReloadEnabled() bool { func (cnf *Configurator) UpsertSplitClientsKeyVal(zoneName, key, value string) { cnf.nginxManager.UpsertSplitClientsKeyVal(zoneName, key, value) } + +// GetIngressControllerReplicas returns the number of ingresscontroller-replicas (previously stored via SetIngressControllerReplicas) +func (cnf *Configurator) GetIngressControllerReplicas() int { + return cnf.ingressControllerReplicas +} + +// SetIngressControllerReplicas sets the number of ingresscontroller-replicas +// Is used for calculating ratelimits +func (cnf *Configurator) SetIngressControllerReplicas(replicas int) { + cnf.ingressControllerReplicas = replicas +} diff --git a/internal/configs/ingress.go b/internal/configs/ingress.go index 549f0888d5..4b41c32a7b 100644 --- a/internal/configs/ingress.go +++ b/internal/configs/ingress.go @@ -78,16 +78,17 @@ type MergeableIngresses struct { // NginxCfgParams is a collection of parameters // used by generateNginxCfg() and generateNginxCfgForMergeableIngresses() type NginxCfgParams struct { - staticParams *StaticConfigParams - ingEx *IngressEx - mergeableIngs *MergeableIngresses - apResources *AppProtectResources - dosResource *appProtectDosResource - baseCfgParams *ConfigParams - isMinion bool - isPlus bool - isResolverConfigured bool - isWildcardEnabled bool + staticParams *StaticConfigParams + ingEx *IngressEx + mergeableIngs *MergeableIngresses + apResources *AppProtectResources + dosResource *appProtectDosResource + baseCfgParams *ConfigParams + isMinion bool + isPlus bool + isResolverConfigured bool + isWildcardEnabled bool + ingressControllerReplicas int } //nolint:gocyclo @@ -278,11 +279,15 @@ func generateNginxCfg(p NginxCfgParams) (version1.IngressNginxConfig, Warnings) RejectCode: cfgParams.LimitReqRejectCode, } if !limitReqZoneExists(limitReqZones, zoneName) { + rate := cfgParams.LimitReqRate + if cfgParams.LimitReqScale && p.ingressControllerReplicas > 0 { + rate = scaleRatelimit(rate, p.ingressControllerReplicas) + } limitReqZones = append(limitReqZones, version1.LimitReqZone{ Name: zoneName, Key: cfgParams.LimitReqKey, Size: cfgParams.LimitReqZoneSize, - Rate: cfgParams.LimitReqRate, + Rate: rate, }) } } @@ -754,3 +759,30 @@ func GetBackendPortAsString(port networking.ServiceBackendPort) string { } return strconv.Itoa(int(port.Number)) } + +// scaleRatelimit divides a given ratelimit by the given number of replicas, adjusting the unit and flooring the result as needed +func scaleRatelimit(ratelimit string, replicas int) string { + if replicas < 1 { + return ratelimit + } + + match := rateRegexp.FindStringSubmatch(ratelimit) + if match == nil { + return ratelimit + } + + number, err := strconv.Atoi(match[1]) + if err != nil { + return ratelimit + } + + numberf := float64(number) / float64(replicas) + + unit := match[2] + if unit == "r/s" && numberf < 1 { + numberf = numberf * 60 + unit = "r/m" + } + + return strconv.Itoa(int(numberf)) + unit +} diff --git a/internal/configs/ingress_test.go b/internal/configs/ingress_test.go index 2810fbaac9..8c7d1ee4fd 100644 --- a/internal/configs/ingress_test.go +++ b/internal/configs/ingress_test.go @@ -1244,6 +1244,70 @@ func TestGenerateNginxCfgForMergeableIngressesForLimitReq(t *testing.T) { } } +func TestGenerateNginxCfgForLimitReqWithScaling(t *testing.T) { + t.Parallel() + cafeIngressEx := createCafeIngressEx() + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-rate"] = "200r/s" + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-key"] = "${request_uri}" + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-burst"] = "100" + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-no-delay"] = "true" + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-delay"] = "80" + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-reject-code"] = "503" + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-dry-run"] = "true" + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-log-level"] = "info" + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-zone-size"] = "11m" + cafeIngressEx.Ingress.Annotations["nginx.org/limit-req-scale"] = "true" + + isPlus := false + configParams := NewDefaultConfigParams(isPlus) + + expectedZones := []version1.LimitReqZone{ + { + Name: "default/cafe-ingress", + Key: "${request_uri}", + Size: "11m", + Rate: "50r/s", + }, + } + + expectedReqs := &version1.LimitReq{ + Zone: "default/cafe-ingress", + Burst: 100, + Delay: 80, + NoDelay: true, + DryRun: true, + LogLevel: "info", + RejectCode: 503, + } + + result, warnings := generateNginxCfg(NginxCfgParams{ + ingEx: &cafeIngressEx, + baseCfgParams: configParams, + staticParams: &StaticConfigParams{}, + isPlus: isPlus, + ingressControllerReplicas: 4, + }) + + if !reflect.DeepEqual(result.LimitReqZones, expectedZones) { + t.Errorf("generateNginxCfg returned \n%v, but expected \n%v", result.LimitReqZones, expectedZones) + } + + for _, server := range result.Servers { + for _, location := range server.Locations { + if !reflect.DeepEqual(location.LimitReq, expectedReqs) { + t.Errorf("generateNginxCfg returned \n%v, but expected \n%v", result.LimitReqZones, expectedZones) + } + } + } + + if !reflect.DeepEqual(result.LimitReqZones, expectedZones) { + t.Errorf("generateNginxCfg returned \n%v, but expected \n%v", result.LimitReqZones, expectedZones) + } + if len(warnings) != 0 { + t.Errorf("generateNginxCfg returned warnings: %v", warnings) + } +} + func createMergeableCafeIngress() *MergeableIngresses { master := networking.Ingress{ ObjectMeta: meta_v1.ObjectMeta{ @@ -2379,3 +2443,59 @@ func TestGetBackendPortAsString(t *testing.T) { } } } + +func TestScaleRatelimit(t *testing.T) { + tests := []struct { + input string + pods int + expected string + }{ + { + input: "10r/s", + pods: 0, + expected: "10r/s", + }, + { + input: "10r/s", + pods: 1, + expected: "10r/s", + }, + { + input: "10r/s", + pods: 2, + expected: "5r/s", + }, + { + input: "10r/s", + pods: 3, + expected: "3r/s", + }, + { + input: "10r/s", + pods: 10, + expected: "1r/s", + }, + { + input: "10r/s", + pods: 20, + expected: "30r/m", + }, + { + input: "10r/m", + pods: 0, + expected: "10r/m", + }, + { + input: "10r/m", + pods: 1, + expected: "10r/m", + }, + } + + for _, testcase := range tests { + scaled := scaleRatelimit(testcase.input, testcase.pods) + if scaled != testcase.expected { + t.Errorf("scaleRatelimit(%s,%d) returned %s but expected %s", testcase.input, testcase.pods, scaled, testcase.expected) + } + } +} diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go index c715e3410b..aa6c12fcdf 100644 --- a/internal/configs/virtualserver.go +++ b/internal/configs/virtualserver.go @@ -283,6 +283,7 @@ type virtualServerConfigurator struct { StaticSSLPath string DynamicWeightChangesReload bool bundleValidator bundleValidator + IngressControllerReplicas int } type oidcPolicyCfg struct { @@ -935,11 +936,12 @@ func (p *policiesCfg) addRateLimitConfig( polName string, vsNamespace string, vsName string, + podReplicas int, ) *validationResults { res := newValidationResults() rlZoneName := fmt.Sprintf("pol_rl_%v_%v_%v_%v", polNamespace, polName, vsNamespace, vsName) p.LimitReqs = append(p.LimitReqs, generateLimitReq(rlZoneName, rateLimit)) - p.LimitReqZones = append(p.LimitReqZones, generateLimitReqZone(rlZoneName, rateLimit)) + p.LimitReqZones = append(p.LimitReqZones, generateLimitReqZone(rlZoneName, rateLimit, podReplicas)) if len(p.LimitReqs) == 1 { p.LimitReqOptions = generateLimitReqOptions(rateLimit) } else { @@ -1400,6 +1402,7 @@ func (vsc *virtualServerConfigurator) generatePolicies( p.Name, ownerDetails.vsNamespace, ownerDetails.vsName, + vsc.IngressControllerReplicas, ) case pol.Spec.JWTAuth != nil: res = config.addJWTAuthConfig(pol.Spec.JWTAuth, key, polNamespace, policyOpts.secretRefs) @@ -1460,12 +1463,16 @@ func generateLimitReq(zoneName string, rateLimitPol *conf_v1.RateLimit) version2 return limitReq } -func generateLimitReqZone(zoneName string, rateLimitPol *conf_v1.RateLimit) version2.LimitReqZone { +func generateLimitReqZone(zoneName string, rateLimitPol *conf_v1.RateLimit, podReplicas int) version2.LimitReqZone { + rate := rateLimitPol.Rate + if rateLimitPol.Scale { + rate = scaleRatelimit(rateLimitPol.Rate, podReplicas) + } return version2.LimitReqZone{ ZoneName: zoneName, Key: rateLimitPol.Key, ZoneSize: rateLimitPol.ZoneSize, - Rate: rateLimitPol.Rate, + Rate: rate, } } diff --git a/internal/configs/virtualserver_test.go b/internal/configs/virtualserver_test.go index 640a86f831..a7f89270db 100644 --- a/internal/configs/virtualserver_test.go +++ b/internal/configs/virtualserver_test.go @@ -5910,6 +5910,47 @@ func TestGeneratePolicies(t *testing.T) { }, msg: "multi rate limit reference", }, + { + policyRefs: []conf_v1.PolicyReference{ + { + Name: "rateLimitScale-policy", + Namespace: "default", + }, + }, + policies: map[string]*conf_v1.Policy{ + "default/rateLimitScale-policy": { + Spec: conf_v1.PolicySpec{ + RateLimit: &conf_v1.RateLimit{ + Key: "test", + ZoneSize: "10M", + Rate: "10r/s", + LogLevel: "notice", + Scale: true, + }, + }, + }, + }, + expected: policiesCfg{ + LimitReqZones: []version2.LimitReqZone{ + { + Key: "test", + ZoneSize: "10M", + Rate: "5r/s", + ZoneName: "pol_rl_default_rateLimitScale-policy_default_test", + }, + }, + LimitReqOptions: version2.LimitReqOptions{ + LogLevel: "notice", + RejectCode: 503, + }, + LimitReqs: []version2.LimitReq{ + { + ZoneName: "pol_rl_default_rateLimitScale-policy_default_test", + }, + }, + }, + msg: "rate limit reference with scale", + }, { policyRefs: []conf_v1.PolicyReference{ { @@ -6282,6 +6323,8 @@ func TestGeneratePolicies(t *testing.T) { } vsc := newVirtualServerConfigurator(&ConfigParams{}, false, false, &StaticConfigParams{}, false, &fakeBV) + // required to test the scaling of the ratelimit + vsc.IngressControllerReplicas = 2 for _, test := range tests { result := vsc.generatePolicies(ownerDetails, test.policyRefs, test.policies, test.context, policyOpts) diff --git a/internal/k8s/configuration.go b/internal/k8s/configuration.go index f7c2890659..f4d9bb05a3 100644 --- a/internal/k8s/configuration.go +++ b/internal/k8s/configuration.go @@ -917,6 +917,11 @@ func (c *Configuration) FindResourcesForAppProtectDosProtected(namespace string, return c.findResourcesForResourceReference(namespace, name, c.appDosProtectedChecker) } +// FindIngressesWithRatelimitScaling finds ingresses that use rate limit scaling +func (c *Configuration) FindIngressesWithRatelimitScaling(svcNamespace string) []Resource { + return c.findResourcesForResourceReference(svcNamespace, "", &ratelimitScalingAnnotationChecker{}) +} + func (c *Configuration) findResourcesForResourceReference(namespace string, name string, checker resourceReferenceChecker) []Resource { c.lock.RLock() defer c.lock.RUnlock() diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index c6e2ab664a..a4a5f4d499 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -853,6 +853,11 @@ func (lbc *LoadBalancerController) syncEndpointSlices(task task) bool { svcName := endpointSlice.Labels["kubernetes.io/service-name"] svcResource := lbc.configuration.FindResourcesForService(endpointSlice.Namespace, svcName) + // check if this is the endpointslice for the controller's own service + if lbc.statusUpdater.namespace == endpointSlice.Namespace && lbc.statusUpdater.externalServiceName == svcName { + return lbc.updateNumberOfIngressControllerReplicas(*endpointSlice) + } + resourceExes := lbc.createExtendedResources(svcResource) if len(resourceExes.IngressExes) > 0 { @@ -910,6 +915,60 @@ func (lbc *LoadBalancerController) syncEndpointSlices(task task) bool { return resourcesFound } +// finds the number of currently active endpoints for the service pointing at the ingresscontroller and updates all configs that depend on that number +func (lbc *LoadBalancerController) updateNumberOfIngressControllerReplicas(controllerEndpointSlice discovery_v1.EndpointSlice) bool { + previous := lbc.configurator.GetIngressControllerReplicas() + current := countReadyEndpoints(controllerEndpointSlice) + found := false + + if current != previous { + // number of active endpoints changed. Update configuration of all ingresses that depend on it + lbc.configurator.SetIngressControllerReplicas(len(controllerEndpointSlice.Endpoints)) + + // handle ingresses + resources := lbc.configuration.FindIngressesWithRatelimitScaling(controllerEndpointSlice.Namespace) + resourceExes := lbc.createExtendedResources(resources) + for _, ingress := range resourceExes.IngressExes { + found = true + _, err := lbc.configurator.AddOrUpdateIngress(ingress) + if err != nil { + glog.Errorf("Error updating ratelimit for Ingress %s/%s: %s", ingress.Ingress.Namespace, ingress.Ingress.Name, err) + } + } + for _, ingress := range resourceExes.MergeableIngresses { + found = true + _, err := lbc.configurator.AddOrUpdateMergeableIngress(ingress) + if err != nil { + glog.Errorf("Error updating ratelimit for Ingress %s/%s: %s", ingress.Master.Ingress.Namespace, ingress.Master.Ingress.Name, err) + } + } + + // handle virtualservers + resources = lbc.findVirtualServersUsingRatelimitScaling() + resourceExes = lbc.createExtendedResources(resources) + for _, vserver := range resourceExes.VirtualServerExes { + found = true + _, err := lbc.configurator.AddOrUpdateVirtualServer(vserver) + if err != nil { + glog.Errorf("Error updating ratelimit for VirtualServer %s/%s: %s", vserver.VirtualServer.Namespace, vserver.VirtualServer.Name, err) + } + } + } + return found +} + +func (lbc *LoadBalancerController) findVirtualServersUsingRatelimitScaling() []Resource { + policies := lbc.getAllPolicies() + resources := make([]Resource, 0, len(policies)) + for _, policy := range policies { + if policy.Spec.RateLimit != nil && policy.Spec.RateLimit.Scale { + newresources := lbc.configuration.FindResourcesForPolicy(policy.Namespace, policy.Name) + resources = append(resources, newresources...) + } + } + return resources +} + func (lbc *LoadBalancerController) virtualServerRequiresEndpointsUpdate(vsEx *configs.VirtualServerEx, serviceName string) bool { for _, upstream := range vsEx.VirtualServer.Spec.Upstreams { if upstream.Service == serviceName && !upstream.UseClusterIP { @@ -967,6 +1026,17 @@ func (lbc *LoadBalancerController) mergeableIngressRequiresEndpointsUpdate(merge return lbc.ingressRequiresEndpointsUpdate(masterIngress, serviceName) } +// countReadyEndpoints returns the number of ready endpoints in this endpointslice +func countReadyEndpoints(slice discovery_v1.EndpointSlice) int { + count := 0 + for _, endpoint := range slice.Endpoints { + if endpoint.Conditions.Ready != nil && *endpoint.Conditions.Ready { + count = count + 1 + } + } + return count +} + func (lbc *LoadBalancerController) createExtendedResources(resources []Resource) configs.ExtendedResources { var result configs.ExtendedResources diff --git a/internal/k8s/reference_checkers.go b/internal/k8s/reference_checkers.go index 6cf279fcd8..c4241d032f 100644 --- a/internal/k8s/reference_checkers.go +++ b/internal/k8s/reference_checkers.go @@ -325,3 +325,31 @@ func (rc *dosResourceReferenceChecker) IsReferencedByVirtualServerRoute(namespac func (rc *dosResourceReferenceChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1.TransportServer) bool { return false } + +type ratelimitScalingAnnotationChecker struct{} + +func (rc *ratelimitScalingAnnotationChecker) IsReferencedByIngress(_ string, _ string, ing *networking.Ingress) bool { + for key, value := range ing.Annotations { + if key == "nginx.org/limit-req-scale" && value == "true" { + return true + } + } + + return false +} + +func (rc *ratelimitScalingAnnotationChecker) IsReferencedByMinion(svcNamespace string, svcName string, ing *networking.Ingress) bool { + return rc.IsReferencedByIngress(svcNamespace, svcName, ing) +} + +func (rc *ratelimitScalingAnnotationChecker) IsReferencedByVirtualServer(_ string, _ string, _ *conf_v1.VirtualServer) bool { + return false +} + +func (rc *ratelimitScalingAnnotationChecker) IsReferencedByVirtualServerRoute(_ string, _ string, _ *conf_v1.VirtualServerRoute) bool { + return false +} + +func (rc *ratelimitScalingAnnotationChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1.TransportServer) bool { + return false +} diff --git a/internal/k8s/reference_checkers_test.go b/internal/k8s/reference_checkers_test.go index 5325f9eedd..0556f9c815 100644 --- a/internal/k8s/reference_checkers_test.go +++ b/internal/k8s/reference_checkers_test.go @@ -1638,3 +1638,49 @@ func TestDosProtectedIsReferencedByVirtualServer(t *testing.T) { } } } + +func TestReplicaReferenceChecker(t *testing.T) { + tests := []struct { + Ingress *networking.Ingress + Expected bool + }{ + { + Ingress: &networking.Ingress{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + "foo": "bar", + }, + }, + }, + Expected: false, + }, + { + Ingress: &networking.Ingress{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + "nginx.org/limit-req-scale": "false", + }, + }, + }, + Expected: false, + }, + { + Ingress: &networking.Ingress{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + "nginx.org/limit-req-scale": "true", + }, + }, + }, + Expected: true, + }, + } + + var checker ratelimitScalingAnnotationChecker + for i, testcase := range tests { + result := checker.IsReferencedByIngress("foo", "bar", testcase.Ingress) + if result != testcase.Expected { + t.Errorf("replicaReferenceChecker did not work for case %d", i) + } + } +} diff --git a/pkg/apis/configuration/v1/types.go b/pkg/apis/configuration/v1/types.go index 067391cd65..58558a4d95 100644 --- a/pkg/apis/configuration/v1/types.go +++ b/pkg/apis/configuration/v1/types.go @@ -606,6 +606,7 @@ type RateLimit struct { DryRun *bool `json:"dryRun"` LogLevel string `json:"logLevel"` RejectCode *int `json:"rejectCode"` + Scale bool `json:"scale"` } // JWTAuth holds JWT authentication configuration.