diff --git a/CHANGELOG.md b/CHANGELOG.md index 650100fa17..606587c9f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -311,7 +311,7 @@ BUGFIXES: HELM CHART: * The version of the helm chart is now 0.6.0. * Add new parameters to the Chart: `controller.appprotect.enable`, `controller.globalConfiguration.create`, `controller.globalConfiguration.spec`, `controller.readyStatus.enable`, `controller.readyStatus.port`, `controller.config.annotations`, `controller.reportIngressStatus.annotations`. Added in [1035](https://github.com/nginxinc/kubernetes-ingress/pull/1035), [1034](https://github.com/nginxinc/kubernetes-ingress/pull/1034), [1029](https://github.com/nginxinc/kubernetes-ingress/pull/1029), [1003](https://github.com/nginxinc/kubernetes-ingress/pull/1003) thanks to [RubyLangdon](https://github.com/RubyLangdon). -* [1047](https://github.com/nginxinc/kubernetes-ingress/pull/1047) and [1009](https://github.com/nginxinc/kubernetes-ingress/pull/1009): Change how Helm manages the custom resource defintions (CRDs) to support installing multiple Ingress Controller releases. **Note**: If you're using the custom resources (`controller.enableCustomResources` is set to `true`), this is a breaking change. See the HELM UPGRADE section below for the upgrade instructions. +* [1047](https://github.com/nginxinc/kubernetes-ingress/pull/1047) and [1009](https://github.com/nginxinc/kubernetes-ingress/pull/1009): Change how Helm manages the custom resource definitions (CRDs) to support installing multiple Ingress Controller releases. **Note**: If you're using the custom resources (`controller.enableCustomResources` is set to `true`), this is a breaking change. See the HELM UPGRADE section below for the upgrade instructions. CHANGES: * Update NGINX version to 1.19.1. diff --git a/Makefile b/Makefile index ffc8a38e8c..ac32b2df63 100644 --- a/Makefile +++ b/Makefile @@ -123,7 +123,7 @@ clean: ## Remove nginx-ingress binary -rm -r dist .PHONY: deps -deps: ## Add missing and remove unused modules, verify deps and dowload them to local cache +deps: ## Add missing and remove unused modules, verify deps and download them to local cache @go mod tidy && go mod verify && go mod download .PHONY: clean-cache diff --git a/build/Dockerfile b/build/Dockerfile index 9b39513a7c..74289ef82b 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -237,7 +237,7 @@ LABEL org.opencontainers.image.revision="${GIT_COMMIT}" LABEL org.opencontainers.image.created="${DATE}" LABEL org.nginx.kic.image.build.target="${TARGETPLATFORM}" LABEL org.nginx.kic.image.build.os="${BUILD_OS}" -LABEL org.nginx.kic.image.build.nginx.vesion="${NGINX_PLUS_VERSION}${NGINX_VERSION}" +LABEL org.nginx.kic.image.build.nginx.version="${NGINX_PLUS_VERSION}${NGINX_VERSION}" ############################################# Build nginx-ingress in golang container ############################################# diff --git a/docs/content/configuration/ingress-resources/advanced-configuration-with-annotations.md b/docs/content/configuration/ingress-resources/advanced-configuration-with-annotations.md index 0ced264196..fce3532b11 100644 --- a/docs/content/configuration/ingress-resources/advanced-configuration-with-annotations.md +++ b/docs/content/configuration/ingress-resources/advanced-configuration-with-annotations.md @@ -164,24 +164,24 @@ The table below summarizes the available annotations. ### Backend Services (Upstreams) -{{% table %}} -|Annotation | ConfigMap Key | Description | Default | Example | -| ---| ---| ---| ---| --- | -|``nginx.org/lb-method`` | ``lb-method`` | Sets the [load balancing method](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/#choosing-a-load-balancing-method). To use the round-robin method, specify ``"round_robin"``. | ``"random two least_conn"`` | | -|``nginx.org/ssl-services`` | N/A | Enables HTTPS or gRPC over SSL when connecting to the endpoints of services. | N/A | [SSL Services Support](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/ssl-services). | -|``nginx.org/grpc-services`` | N/A | Enables gRPC for services. Note: requires HTTP/2 (see ``http2`` ConfigMap key); only works for Ingresses with TLS termination enabled. | N/A | [GRPC Services Support](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/grpc-services). | -|``nginx.org/websocket-services`` | N/A | Enables WebSocket for services. | N/A | [WebSocket support](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/websocket). | -|``nginx.org/max-fails`` | ``max-fails`` | Sets the value of the [max_fails](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#max_fails) parameter of the ``server`` directive. | ``1`` | | -|``nginx.org/max-conns`` | N\A | Sets the value of the [max_conns](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#max_conns) parameter of the ``server`` directive. | ``0`` | | -|``nginx.org/upstream-zone-size`` | ``upstream-zone-size`` | Sets the size of the shared memory [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) for upstreams. For NGINX, the special value 0 disables the shared memory zones. For NGINX Plus, shared memory zones are required and cannot be disabled. The special value 0 will be ignored. | ``256K`` | | -|``nginx.org/fail-timeout`` | ``fail-timeout`` | Sets the value of the [fail_timeout](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#fail_timeout) parameter of the ``server`` directive. | ``10s`` | | -|``nginx.com/sticky-cookie-services`` | N/A | Configures session persistence. | N/A | [Session Persistence](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/session-persistence). | -|``nginx.org/keepalive`` | ``keepalive`` | Sets the value of the [keepalive](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive) directive. Note that ``proxy_set_header Connection "";`` is added to the generated configuration when the value > 0. | ``0`` | | -|``nginx.com/health-checks`` | N/A | Enables active health checks. | ``False`` | [Support for Active Health Checks](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/health-checks). | -|``nginx.com/health-checks-mandatory`` | N/A | Configures active health checks as mandatory. | ``False`` | [Support for Active Health Checks](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/health-checks). | -|``nginx.com/health-checks-mandatory-queue`` | N/A | When active health checks are mandatory, configures a queue for temporary storing incoming requests during the time when NGINX Plus is checking the health of the endpoints after a configuration reload. | ``0`` | [Support for Active Health Checks](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/health-checks). | -|``nginx.com/slow-start`` | N/A | Sets the upstream server [slow-start period](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/#server-slow-start). By default, slow-start is activated after a server becomes [available](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/#passive-health-checks) or [healthy](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/#active-health-checks). To enable slow-start for newly added servers, configure [mandatory active health checks](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/health-checks). | ``"0s"`` | | -{{% /table %}} +{{% table %}} +|Annotation | ConfigMap Key | Description | Default | Example | +| ---| ---| ---| ---| --- | +|``nginx.org/lb-method`` | ``lb-method`` | Sets the [load balancing method](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/#choosing-a-load-balancing-method). To use the round-robin method, specify ``"round_robin"``. | ``"random two least_conn"`` | | +|``nginx.org/ssl-services`` | N/A | Enables HTTPS or gRPC over SSL when connecting to the endpoints of services. | N/A | [SSL Services Support](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/ssl-services). | +|``nginx.org/grpc-services`` | N/A | Enables gRPC for services. Note: requires HTTP/2 (see ``http2`` ConfigMap key); only works for Ingresses with TLS termination enabled. | N/A | [GRPC Services Support](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/grpc-services). | +|``nginx.org/websocket-services`` | N/A | Enables WebSocket for services. | N/A | [WebSocket support](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/websocket). | +|``nginx.org/max-fails`` | ``max-fails`` | Sets the value of the [max_fails](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#max_fails) parameter of the ``server`` directive. | ``1`` | | +|``nginx.org/max-conns`` | N\A | Sets the value of the [max_conns](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#max_conns) parameter of the ``server`` directive. | ``0`` | | +|``nginx.org/upstream-zone-size`` | ``upstream-zone-size`` | Sets the size of the shared memory [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) for upstreams. For NGINX, the special value 0 disables the shared memory zones. For NGINX Plus, shared memory zones are required and cannot be disabled. The special value 0 will be ignored. | ``256K`` | | +|``nginx.org/fail-timeout`` | ``fail-timeout`` | Sets the value of the [fail_timeout](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#fail_timeout) parameter of the ``server`` directive. | ``10s`` | | +|``nginx.com/sticky-cookie-services`` | N/A | Configures session persistence. | N/A | [Session Persistence](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/session-persistence). | +|``nginx.org/keepalive`` | ``keepalive`` | Sets the value of the [keepalive](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive) directive. Note that ``proxy_set_header Connection "";`` is added to the generated configuration when the value > 0. | ``0`` | | +|``nginx.com/health-checks`` | N/A | Enables active health checks. | ``False`` | [Support for Active Health Checks](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/health-checks). | +|``nginx.com/health-checks-mandatory`` | N/A | Configures active health checks as mandatory. | ``False`` | [Support for Active Health Checks](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/health-checks). | +|``nginx.com/health-checks-mandatory-queue`` | N/A | When active health checks are mandatory, configures a queue for temporary storing incoming requests during the time when NGINX Plus is checking the health of the endpoints after a configuration reload. | ``0`` | [Support for Active Health Checks](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/health-checks). | +|``nginx.com/slow-start`` | N/A | Sets the upstream server [slow-start period](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/#server-slow-start). By default, slow-start is activated after a server becomes [available](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/#passive-health-checks) or [healthy](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/#active-health-checks). To enable slow-start for newly added servers, configure [mandatory active health checks](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/health-checks). | ``"0s"`` | | +{{% /table %}} ### Snippets and Custom Templates @@ -196,12 +196,12 @@ The table below summarizes the available annotations. **Note**: The App Protect annotations only work if App Protect module is [installed](/nginx-ingress-controller/app-protect/installation/). -{{% table %}} -|Annotation | ConfigMap Key | Description | Default | Example | -| ---| ---| ---| ---| --- | -|``appprotect.f5.com/app-protect-policy`` | N/A | The name of the App Protect Policy for the Ingress Resource. Format is ``namespace/name``. If no namespace is specified, the same namespace of the Ingress Resource is used. If not specified but ``appprotect.f5.com/app-protect-enable`` is true, a default policy id applied. If the referenced policy resource does not exist, or policy is invalid, this annotation will be ignored, and the default policy will be applied. | N/A | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | -|``appprotect.f5.com/app-protect-enable`` | N/A | Enable App Protect for the Ingress Resource. | ``False`` | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | -|``appprotect.f5.com/app-protect-security-log-enable`` | N/A | Enable the [security log](/nginx-app-protect/troubleshooting/#app-protect-logging-overview) for App Protect. | ``False`` | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | -|``appprotect.f5.com/app-protect-security-log`` | N/A | The App Protect log configuration for the Ingress Resource. Format is ``namespace/name``. If no namespace is specified, the same namespace as the Ingress Resource is used. If not specified the default is used which is: filter: ``illegal``, format: ``default``. Multiple configurations can be specified in a comma seperated list. Both log configurations and destinations list (see below) must be of equal length. Configs and destinations are paired by the list indices. | N/A | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | -|``appprotect.f5.com/app-protect-security-log-destination`` | N/A | The destination of the security log. For more information check the [DESTINATION argument](/nginx-app-protect/troubleshooting/#app-protect-logging-overview). Multiple destinations can be specified in a coma separated list. Both log configurations and destinations list (see above) must be of equal length. Configs and destinations are paired by the list indices. | ``syslog:server=localhost:514`` | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | -{{% /table %}} +{{% table %}} +|Annotation | ConfigMap Key | Description | Default | Example | +| ---| ---| ---| ---| --- | +|``appprotect.f5.com/app-protect-policy`` | N/A | The name of the App Protect Policy for the Ingress Resource. Format is ``namespace/name``. If no namespace is specified, the same namespace of the Ingress Resource is used. If not specified but ``appprotect.f5.com/app-protect-enable`` is true, a default policy id applied. If the referenced policy resource does not exist, or policy is invalid, this annotation will be ignored, and the default policy will be applied. | N/A | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | +|``appprotect.f5.com/app-protect-enable`` | N/A | Enable App Protect for the Ingress Resource. | ``False`` | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | +|``appprotect.f5.com/app-protect-security-log-enable`` | N/A | Enable the [security log](/nginx-app-protect/troubleshooting/#app-protect-logging-overview) for App Protect. | ``False`` | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | +|``appprotect.f5.com/app-protect-security-log`` | N/A | The App Protect log configuration for the Ingress Resource. Format is ``namespace/name``. If no namespace is specified, the same namespace as the Ingress Resource is used. If not specified the default is used which is: filter: ``illegal``, format: ``default``. Multiple configurations can be specified in a comma separated list. Both log configurations and destinations list (see below) must be of equal length. Configs and destinations are paired by the list indices. | N/A | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | +|``appprotect.f5.com/app-protect-security-log-destination`` | N/A | The destination of the security log. For more information check the [DESTINATION argument](/nginx-app-protect/troubleshooting/#app-protect-logging-overview). Multiple destinations can be specified in a coma separated list. Both log configurations and destinations list (see above) must be of equal length. Configs and destinations are paired by the list indices. | ``syslog:server=localhost:514`` | [Example for App Protect](https://github.com/nginxinc/kubernetes-ingress/tree/v2.0.3/examples/appprotect). | +{{% /table %}} diff --git a/docs/content/configuration/transportserver-resource.md b/docs/content/configuration/transportserver-resource.md index afca71cae7..a3e201279c 100644 --- a/docs/content/configuration/transportserver-resource.md +++ b/docs/content/configuration/transportserver-resource.md @@ -226,7 +226,7 @@ sessionParameters: {{% table %}} |Field | Description | Type | Required | | ---| ---| ---| --- | -|``timeout`` | The timeout between two succesive read or write operations on client or proxied server connections. See [proxy_timeout](http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout) directive. The default is ``10m``. | ``string`` | No | +|``timeout`` | The timeout between two successive read or write operations on client or proxied server connections. See [proxy_timeout](http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout) directive. The default is ``10m``. | ``string`` | No | {{% /table %}} ### Action diff --git a/docs/content/releases.md b/docs/content/releases.md index 35c1706158..30d9eb00be 100644 --- a/docs/content/releases.md +++ b/docs/content/releases.md @@ -361,7 +361,7 @@ UPGRADE: * For NGINX Plus, please build your own image using the 1.11.0 source code. * For Helm, use version 0.9.0 of the chart. * [1241](https://github.com/nginxinc/kubernetes-ingress/pull/1241) improved the Makefile. As a result, the commands for building the Ingress Controller image were changed. See the updated commands [here](https://docs.nginx.com/nginx-ingress-controller/installation/building-ingress-controller-image/#building-the-image-and-pushing-it-to-the-private-registry). -* [1241](https://github.com/nginxinc/kubernetes-ingress/pull/1241) also consolidated all Dockerfiles into a singe Dockerfile. If you customized any of the Dockerfiles, make sure to port the changes to the new Dockerfile. +* [1241](https://github.com/nginxinc/kubernetes-ingress/pull/1241) also consolidated all Dockerfiles into a single Dockerfile. If you customized any of the Dockerfiles, make sure to port the changes to the new Dockerfile. * [1288](https://github.com/nginxinc/kubernetes-ingress/pull/1288) further improved validation of Ingress annotations. See this [document](https://docs.nginx.com/nginx-ingress-controller/configuration/ingress-resources/advanced-configuration-with-annotations/#validation) to learn more about which annotations are validated. Note that the Ingress Controller will reject resources with invalid annotations, which means clients will see `404` responses from NGINX. Before upgrading, ensure the Ingress resources don't have annotations with invalid values. Otherwise, after the upgrade, the Ingress Controller will reject such resources. * [1457](https://github.com/nginxinc/kubernetes-ingress/pull/1457) fixed the bug when an Ingress Controller pod could become ready before it generated the configuration for all relevant resources in the cluster. The fix also requires that the Ingress Controller can successfully list the relevant resources from the Kubernetes API. For example, if the `-enable-custom-resources` cli argument is `true` (which is the default), the VirtualServer, VirtualServerRoute, TransportServer, and Policy CRDs must be created in the cluster, so that the Ingress Controller can list them. This is similar to other custom resources -- see the list [here](https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-manifests/#create-custom-resources). Thus, before upgrading, make sure that the CRDs are created in the cluster. Otherwise, the Ingress Controller pods will not become ready. @@ -622,7 +622,7 @@ BUGFIXES: HELM CHART: * The version of the helm chart is now 0.6.0. * Add new parameters to the Chart: `controller.appprotect.enable`, `controller.globalConfiguration.create`, `controller.globalConfiguration.spec`, `controller.readyStatus.enable`, `controller.readyStatus.port`, `controller.config.annotations`, `controller.reportIngressStatus.annotations`. Added in [1035](https://github.com/nginxinc/kubernetes-ingress/pull/1035), [1034](https://github.com/nginxinc/kubernetes-ingress/pull/1034), [1029](https://github.com/nginxinc/kubernetes-ingress/pull/1029), [1003](https://github.com/nginxinc/kubernetes-ingress/pull/1003) thanks to [RubyLangdon](https://github.com/RubyLangdon). -* [1047](https://github.com/nginxinc/kubernetes-ingress/pull/1047) and [1009](https://github.com/nginxinc/kubernetes-ingress/pull/1009): Change how Helm manages the custom resource defintions (CRDs) to support installing multiple Ingress Controller releases. **Note**: If you're using the custom resources (`controller.enableCustomResources` is set to `true`), this is a breaking change. See the HELM UPGRADE section below for the upgrade instructions. +* [1047](https://github.com/nginxinc/kubernetes-ingress/pull/1047) and [1009](https://github.com/nginxinc/kubernetes-ingress/pull/1009): Change how Helm manages the custom resource definitions (CRDs) to support installing multiple Ingress Controller releases. **Note**: If you're using the custom resources (`controller.enableCustomResources` is set to `true`), this is a breaking change. See the HELM UPGRADE section below for the upgrade instructions. CHANGES: * Update NGINX version to 1.19.1. diff --git a/internal/configs/ingress.go b/internal/configs/ingress.go index 4d832f2d4b..fcf402c653 100644 --- a/internal/configs/ingress.go +++ b/internal/configs/ingress.go @@ -19,7 +19,7 @@ import ( const emptyHost = "" -// AppProtectResources holds namespace names of App Protect resources relavant to an Ingress +// AppProtectResources holds namespace names of App Protect resources relevant to an Ingress type AppProtectResources struct { AppProtectPolicy string AppProtectLogconfs []string diff --git a/internal/configs/oidc/oidc.conf b/internal/configs/oidc/oidc.conf index 728933422e..b6e963fc55 100644 --- a/internal/configs/oidc/oidc.conf +++ b/internal/configs/oidc/oidc.conf @@ -8,7 +8,7 @@ location = /_jwks_uri { internal; - proxy_cache jwk; # Cache the JWK Set recieved from IdP + proxy_cache jwk; # Cache the JWK Set received from IdP proxy_cache_valid 200 12h; # How long to consider keys "fresh" proxy_cache_use_stale error timeout updating; # Use old JWK Set if cannot reach IdP proxy_ssl_server_name on; # For SNI to the IdP diff --git a/internal/k8s/configuration_test.go b/internal/k8s/configuration_test.go index 019002ec88..3beaf172ed 100644 --- a/internal/k8s/configuration_test.go +++ b/internal/k8s/configuration_test.go @@ -2393,7 +2393,7 @@ func TestAddGlobalConfiguration(t *testing.T) { // Swap listeners - // We need to hanlde this case in Controller propoperly - update config for all TransportServers and reload once + // We need to handle this case in Controller propoperly - update config for all TransportServers and reload once // to avoid any race conditions // and errors like nginx: [emerg] duplicate "0.0.0.0:8888" address and port pair in /etc/nginx/nginx.conf:73 diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 4a9b7bbb32..d64686d209 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -1862,13 +1862,13 @@ func TestGetWAFPoliciesForAppProtectPolicy(t *testing.T) { pols: policies, key: "ns1/apPol", want: []*conf_v1.Policy{apPol}, - msg: "WAF pols that ref apPol which has a namepace", + msg: "WAF pols that ref apPol which has a namespace", }, { pols: policies, key: "default/apPol", want: []*conf_v1.Policy{apPolNoNs}, - msg: "WAF pols that ref apPol which has no namepace", + msg: "WAF pols that ref apPol which has no namespace", }, { pols: policies, @@ -1948,13 +1948,13 @@ func TestGetWAFPoliciesForAppProtectLogConf(t *testing.T) { pols: policies, key: "ns1/logConf", want: []*conf_v1.Policy{logConf}, - msg: "WAF pols that ref logConf which has a namepace", + msg: "WAF pols that ref logConf which has a namespace", }, { pols: policies, key: "default/logConf", want: []*conf_v1.Policy{logConfNoNs}, - msg: "WAF pols that ref logConf which has no namepace", + msg: "WAF pols that ref logConf which has no namespace", }, { pols: policies, diff --git a/pkg/apis/configuration/validation/globalconfiguration_test.go b/pkg/apis/configuration/validation/globalconfiguration_test.go index f757111e69..6354fd5a88 100644 --- a/pkg/apis/configuration/validation/globalconfiguration_test.go +++ b/pkg/apis/configuration/validation/globalconfiguration_test.go @@ -75,7 +75,7 @@ func TestValidateListeners(t *testing.T) { allErrs := gcv.validateListeners(listeners, field.NewPath("listeners")) if len(allErrs) > 0 { - t.Errorf("validateListeners() returned errors %v for valid intput", allErrs) + t.Errorf("validateListeners() returned errors %v for valid input", allErrs) } } @@ -137,7 +137,7 @@ func TestValidateListener(t *testing.T) { allErrs := gcv.validateListener(listener, field.NewPath("listener")) if len(allErrs) > 0 { - t.Errorf("validateListener() returned errors %v for valid intput", allErrs) + t.Errorf("validateListener() returned errors %v for valid input", allErrs) } } diff --git a/tests/suite/resources_utils.py b/tests/suite/resources_utils.py index 6bae1a0f9c..67fd2fcf01 100644 --- a/tests/suite/resources_utils.py +++ b/tests/suite/resources_utils.py @@ -228,7 +228,8 @@ def scale_deployment(v1: CoreV1Api, apps_v1_api: AppsV1Api, name, namespace, val elif value is 0: replica_num = (apps_v1_api.read_namespaced_deployment_scale(name, namespace)).spec.replicas while(replica_num is not None): - replica_num = (apps_v1_api.read_namespaced_deployment_scale(name, namespace)).spec.replicas + replica_num = (apps_v1_api.read_namespaced_deployment_scale( + name, namespace)).spec.replicas time.sleep(1) print("Number of replicas is not 0, retrying...") @@ -941,7 +942,7 @@ def wait_for_event_increment(kube_apis, namespace, event_count, offset) -> bool: """ Wait for event count to increase. - :param kube_apis: Kubernates API + :param kube_apis: Kubernetes API :param namespace: event namespace :param event_count: Current even count :param offset: Number of events generated by last operation @@ -1278,7 +1279,7 @@ def ensure_response_from_backend(req_url, host, additional_headers=None, check40 def get_service_endpoint(kube_apis, service_name, namespace) -> str: """ Wait for endpoint resource to spin up. - :param kube_apis: Kubernates API object + :param kube_apis: Kubernetes API object :param service_name: Service resource name :param namespace: test namespace :return: endpoint ip diff --git a/tests/suite/test_transport_server_tcp_load_balance.py b/tests/suite/test_transport_server_tcp_load_balance.py index 5f43c8ca74..b705013cb1 100644 --- a/tests/suite/test_transport_server_tcp_load_balance.py +++ b/tests/suite/test_transport_server_tcp_load_balance.py @@ -20,6 +20,7 @@ ) from settings import TEST_DATA + @pytest.mark.ts @pytest.mark.parametrize( "crd_ingress_controller, transport_server_setup", @@ -59,8 +60,9 @@ def test_number_of_replicas( """ The load balancing of TCP should result in 4 servers to match the 4 replicas of a service. """ - original = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "tcp-service", transport_server_setup.namespace, 4) - + original = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, + "tcp-service", transport_server_setup.namespace, 4) + num_servers = 0 retry = 0 @@ -81,7 +83,8 @@ def test_number_of_replicas( assert num_servers is 4 - scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "tcp-service", transport_server_setup.namespace, original) + scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "tcp-service", + transport_server_setup.namespace, original) retry = 0 while(num_servers is not original and retry <= 50): result_conf = get_ts_nginx_template_conf( @@ -91,13 +94,13 @@ def test_number_of_replicas( transport_server_setup.ingress_pod_name, ingress_controller_prerequisites.namespace ) - + pattern = 'server .*;' num_servers = len(re.findall(pattern, result_conf)) retry += 1 wait_before_test(1) print(f"Retry #{retry}") - + assert num_servers is original def test_tcp_request_load_balanced( @@ -170,7 +173,7 @@ def test_tcp_request_load_balanced_multiple( client.close() assert endpoint is not "" - # Step 2, add a second TransportServer with the same port and confirm te collision + # Step 2, add a second TransportServer with the same port and confirm the collision transport_server_file = f"{TEST_DATA}/transport-server-tcp-load-balance/second-transport-server.yaml" ts_resource = create_ts_from_yaml( kube_apis.custom_objects, transport_server_file, transport_server_setup.namespace @@ -184,14 +187,15 @@ def test_tcp_request_load_balanced_multiple( second_ts_name, ) assert ( - response["status"] - and response["status"]["reason"] == "Rejected" - and response["status"]["state"] == "Warning" - and response["status"]["message"] == "Listener tcp-server is taken by another resource" + response["status"] + and response["status"]["reason"] == "Rejected" + and response["status"]["state"] == "Warning" + and response["status"]["message"] == "Listener tcp-server is taken by another resource" ) # Step 3, remove the default TransportServer with the same port - delete_ts(kube_apis.custom_objects, transport_server_setup.resource, transport_server_setup.namespace) + delete_ts(kube_apis.custom_objects, transport_server_setup.resource, + transport_server_setup.namespace) wait_before_test() response = read_ts( @@ -200,9 +204,9 @@ def test_tcp_request_load_balanced_multiple( second_ts_name, ) assert ( - response["status"] - and response["status"]["reason"] == "AddedOrUpdated" - and response["status"]["state"] == "Valid" + response["status"] + and response["status"]["reason"] == "AddedOrUpdated" + and response["status"]["state"] == "Valid" ) # Step 4, confirm load balancing is still working. @@ -330,7 +334,6 @@ def test_tcp_request_max_connections( wait_before_test(1) print(f"Retry #{retry}") - assert configs is 3 # step 2 - make the number of allowed connections @@ -432,7 +435,7 @@ def test_tcp_request_load_balanced_method( wait_before_test(1) print(f"Retry #{retry}") - assert len(endpoints) is 1 + assert len(endpoints) is 1 # Step 3 - restore to default load balancing method and confirm requests are balanced. @@ -571,7 +574,7 @@ def test_tcp_failing_healthcheck_with_match( client.sendall(b'connect') try: - client.recv(4096) # must return ConnectionResetError + client.recv(4096) # must return ConnectionResetError client.close() pytest.fail("We expected an error here, but didn't get it. Exiting...") except ConnectionResetError as ex: @@ -580,4 +583,4 @@ def test_tcp_failing_healthcheck_with_match( # Step 3 - restore - self.restore_ts(kube_apis, transport_server_setup) \ No newline at end of file + self.restore_ts(kube_apis, transport_server_setup) diff --git a/tests/suite/test_transport_server_udp_load_balance.py b/tests/suite/test_transport_server_udp_load_balance.py index 0b5667f483..35b0a8ee17 100644 --- a/tests/suite/test_transport_server_udp_load_balance.py +++ b/tests/suite/test_transport_server_udp_load_balance.py @@ -17,6 +17,7 @@ ) from settings import TEST_DATA + @pytest.mark.ts @pytest.mark.parametrize( "crd_ingress_controller, transport_server_setup", @@ -55,7 +56,8 @@ def test_number_of_replicas( """ The load balancing of UDP should result in 4 servers to match the 4 replicas of a service. """ - original = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "udp-service", transport_server_setup.namespace, 4) + original = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, + "udp-service", transport_server_setup.namespace, 4) num_servers = 0 retry = 0 @@ -67,16 +69,17 @@ def test_number_of_replicas( transport_server_setup.ingress_pod_name, ingress_controller_prerequisites.namespace ) - + pattern = 'server .*;' num_servers = len(re.findall(pattern, result_conf)) retry += 1 wait_before_test(1) print(f"Retry #{retry}") - + assert num_servers is 4 - scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "udp-service", transport_server_setup.namespace, original) + scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "udp-service", + transport_server_setup.namespace, original) retry = 0 while(num_servers is not original and retry <= 50): result_conf = get_ts_nginx_template_conf( @@ -86,13 +89,13 @@ def test_number_of_replicas( transport_server_setup.ingress_pod_name, ingress_controller_prerequisites.namespace ) - + pattern = 'server .*;' num_servers = len(re.findall(pattern, result_conf)) retry += 1 wait_before_test(1) print(f"Retry #{retry}") - + assert num_servers is original def test_udp_request_load_balanced( @@ -162,7 +165,7 @@ def test_udp_request_load_balanced_multiple( print(f'response: {endpoint}') client.close() - # Step 2, add a second TransportServer with the same port and confirm te collision + # Step 2, add a second TransportServer with the same port and confirm the collision transport_server_file = f"{TEST_DATA}/transport-server-udp-load-balance/second-transport-server.yaml" ts_resource = create_ts_from_yaml( kube_apis.custom_objects, transport_server_file, transport_server_setup.namespace @@ -176,14 +179,15 @@ def test_udp_request_load_balanced_multiple( second_ts_name, ) assert ( - response["status"] - and response["status"]["reason"] == "Rejected" - and response["status"]["state"] == "Warning" - and response["status"]["message"] == "Listener udp-server is taken by another resource" + response["status"] + and response["status"]["reason"] == "Rejected" + and response["status"]["state"] == "Warning" + and response["status"]["message"] == "Listener udp-server is taken by another resource" ) # Step 3, remove the default TransportServer with the same port - delete_ts(kube_apis.custom_objects, transport_server_setup.resource, transport_server_setup.namespace) + delete_ts(kube_apis.custom_objects, transport_server_setup.resource, + transport_server_setup.namespace) wait_before_test() response = read_ts( @@ -192,9 +196,9 @@ def test_udp_request_load_balanced_multiple( second_ts_name, ) assert ( - response["status"] - and response["status"]["reason"] == "AddedOrUpdated" - and response["status"]["state"] == "Valid" + response["status"] + and response["status"]["reason"] == "AddedOrUpdated" + and response["status"]["state"] == "Valid" ) # Step 4, confirm load balancing is still working. @@ -294,7 +298,7 @@ def test_udp_passing_healthcheck_with_match( retry = 0 endpoints = {} - while(len(endpoints) is not 3 and retry <=30): + while(len(endpoints) is not 3 and retry <= 30): for i in range(20): client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) client.sendto("ping".encode('utf-8'), (host, port))