diff --git a/cmd/nginx-ingress/flags.go b/cmd/nginx-ingress/flags.go
index 0384f0625b..177f3c3877 100644
--- a/cmd/nginx-ingress/flags.go
+++ b/cmd/nginx-ingress/flags.go
@@ -139,6 +139,15 @@ var (
prometheusMetricsListenPort = flag.Int("prometheus-metrics-listen-port", 9113,
"Set the port where the Prometheus metrics are exposed. [1024 - 65535]")
+ enableServiceInsight = flag.Bool("enable-service-insight", false,
+ `Enable service insight for external load balancers. Requires -nginx-plus`)
+
+ serviceInsightTLSSecretName = flag.String("service-insight-tls-secret", "",
+ `A Secret with a TLS certificate and key for TLS termination of the service insight.`)
+
+ serviceInsightListenPort = flag.Int("service-insight-listen-port", 9114,
+ "Set the port where the Service Insight stats are exposed. Requires -nginx-plus. [1024 - 65535]")
+
enableCustomResources = flag.Bool("enable-custom-resources", true,
"Enable custom resources")
@@ -250,6 +259,11 @@ func parseFlags() {
*enableLatencyMetrics = false
}
+ if *enableServiceInsight && !*nginxPlus {
+ glog.Warning("enable-service-insight flag support is for NGINX Plus, service insight endpoint will not be exposed")
+ *enableServiceInsight = false
+ }
+
if *enableCertManager && !*enableCustomResources {
glog.Fatal("enable-cert-manager flag requires -enable-custom-resources")
}
@@ -352,6 +366,11 @@ func validationChecks() {
glog.Fatalf("Invalid value for ready-status-port: %v", readyStatusPortValidationError)
}
+ healthProbePortValidationError := validatePort(*serviceInsightListenPort)
+ if healthProbePortValidationError != nil {
+ glog.Fatalf("Invalid value for service-insight-listen-port: %v", metricsPortValidationError)
+ }
+
var err error
allowedCIDRs, err = parseNginxStatusAllowCIDRs(*nginxStatusAllowCIDRs)
if err != nil {
diff --git a/cmd/nginx-ingress/main.go b/cmd/nginx-ingress/main.go
index 3fbf2a4f4c..f05449fb99 100644
--- a/cmd/nginx-ingress/main.go
+++ b/cmd/nginx-ingress/main.go
@@ -16,6 +16,7 @@ import (
"github.com/nginxinc/kubernetes-ingress/internal/configs"
"github.com/nginxinc/kubernetes-ingress/internal/configs/version1"
"github.com/nginxinc/kubernetes-ingress/internal/configs/version2"
+ "github.com/nginxinc/kubernetes-ingress/internal/healthcheck"
"github.com/nginxinc/kubernetes-ingress/internal/k8s"
"github.com/nginxinc/kubernetes-ingress/internal/k8s/secrets"
"github.com/nginxinc/kubernetes-ingress/internal/metrics"
@@ -120,6 +121,10 @@ func main() {
transportServerValidator := cr_validation.NewTransportServerValidator(*enableTLSPassthrough, *enableSnippets, *nginxPlus)
virtualServerValidator := cr_validation.NewVirtualServerValidator(cr_validation.IsPlus(*nginxPlus), cr_validation.IsDosEnabled(*appProtectDos), cr_validation.IsCertManagerEnabled(*enableCertManager), cr_validation.IsExternalDNSEnabled(*enableExternalDNS))
+ if *enableServiceInsight {
+ createHealthProbeEndpoint(kubeClient, plusClient, cnf)
+ }
+
lbcInput := k8s.NewLoadBalancerControllerInput{
KubeClient: kubeClient,
ConfClient: confClient,
@@ -446,6 +451,10 @@ func createGlobalConfigurationValidator() *cr_validation.GlobalConfigurationVali
forbiddenListenerPorts[*prometheusMetricsListenPort] = true
}
+ if *enableServiceInsight {
+ forbiddenListenerPorts[*serviceInsightListenPort] = true
+ }
+
return cr_validation.NewGlobalConfigurationValidator(forbiddenListenerPorts)
}
@@ -674,6 +683,22 @@ func createPlusAndLatencyCollectors(
return plusCollector, syslogListener, lc
}
+func createHealthProbeEndpoint(kubeClient *kubernetes.Clientset, plusClient *client.NginxClient, cnf *configs.Configurator) {
+ if !*enableServiceInsight {
+ return
+ }
+ var serviceInsightSecret *api_v1.Secret
+ var err error
+
+ if *serviceInsightTLSSecretName != "" {
+ serviceInsightSecret, err = getAndValidateSecret(kubeClient, *serviceInsightTLSSecretName)
+ if err != nil {
+ glog.Fatalf("Error trying to get the service insight TLS secret %v: %v", *serviceInsightTLSSecretName, err)
+ }
+ }
+ go healthcheck.RunHealthCheck(*serviceInsightListenPort, plusClient, cnf, serviceInsightSecret)
+}
+
func processGlobalConfiguration() {
if *globalConfiguration != "" {
_, _, err := k8s.ParseNamespaceName(*globalConfiguration)
diff --git a/deployments/deployment/nginx-plus-ingress.yaml b/deployments/deployment/nginx-plus-ingress.yaml
index a73d7d9fde..0263f8fb59 100644
--- a/deployments/deployment/nginx-plus-ingress.yaml
+++ b/deployments/deployment/nginx-plus-ingress.yaml
@@ -32,6 +32,8 @@ spec:
containerPort: 8081
- name: prometheus
containerPort: 9113
+ - name: service-insight
+ containerPort: 9114
readinessProbe:
httpGet:
path: /nginx-ready
@@ -75,4 +77,5 @@ spec:
#- -report-ingress-status
#- -external-service=nginx-ingress
#- -enable-prometheus-metrics
+ #- -enable-service-insight
#- -global-configuration=$(POD_NAMESPACE)/nginx-configuration
diff --git a/deployments/helm-chart/README.md b/deployments/helm-chart/README.md
index 1c910670e7..e1bb7ee7e2 100644
--- a/deployments/helm-chart/README.md
+++ b/deployments/helm-chart/README.md
@@ -262,6 +262,10 @@ Parameter | Description | Default
`prometheus.port` | Configures the port to scrape the metrics. | 9113
`prometheus.scheme` | Configures the HTTP scheme to use for connections to the Prometheus endpoint. | http
`prometheus.secret` | The namespace / name of a Kubernetes TLS Secret. If specified, this secret is used to secure the Prometheus endpoint with TLS connections. | ""
+`serviceInsight.create` | Expose NGINX Plus Service Insight endpoint. | false
+`serviceInsight.port` | Configures the port to expose endpoints. | 9114
+`serviceInsight.scheme` | Configures the HTTP scheme to use for connections to the Service Insight endpoint. | http
+`serviceInsight.secret` | The namespace / name of a Kubernetes TLS Secret. If specified, this secret is used to secure the Service Insight endpoint with TLS connections. | ""
`nginxServiceMesh.enable` | Enable integration with NGINX Service Mesh. See the NGINX Service Mesh [docs](https://docs.nginx.com/nginx-service-mesh/tutorials/kic/deploy-with-kic/) for more details. Requires `controller.nginxplus`. | false
`nginxServiceMesh.enableEgress` | Enable NGINX Service Mesh workloads to route egress traffic through the Ingress Controller. See the NGINX Service Mesh [docs](https://docs.nginx.com/nginx-service-mesh/tutorials/kic/deploy-with-kic/#enabling-egress) for more details. Requires `nginxServiceMesh.enable`. | false
diff --git a/deployments/helm-chart/templates/controller-daemonset.yaml b/deployments/helm-chart/templates/controller-daemonset.yaml
index 1c2c74cbc8..30a864d878 100644
--- a/deployments/helm-chart/templates/controller-daemonset.yaml
+++ b/deployments/helm-chart/templates/controller-daemonset.yaml
@@ -94,6 +94,10 @@ spec:
- name: prometheus
containerPort: {{ .Values.prometheus.port }}
{{- end }}
+{{- if .Values.serviceInsight.create }}
+ - name: service-insight
+ containerPort: {{ .Values.serviceInsight.port }}
+{{- end }}
{{- if .Values.controller.readyStatus.enable }}
- name: readiness-port
containerPort: {{ .Values.controller.readyStatus.port }}
@@ -199,6 +203,9 @@ spec:
- -enable-prometheus-metrics={{ .Values.prometheus.create }}
- -prometheus-metrics-listen-port={{ .Values.prometheus.port }}
- -prometheus-tls-secret={{ .Values.prometheus.secret }}
+ - -enable-service-insight={{ .Values.serviceInsight.create }}
+ - -service-insight-listen-port={{ .Values.serviceInsight.port }}
+ - -service-insight-tls-secret={{ .Values.serviceInsight.secret }}
- -enable-custom-resources={{ .Values.controller.enableCustomResources }}
- -enable-snippets={{ .Values.controller.enableSnippets }}
- -include-year={{ .Values.controller.includeYear }}
diff --git a/deployments/helm-chart/templates/controller-deployment.yaml b/deployments/helm-chart/templates/controller-deployment.yaml
index c4bd207515..282e25583d 100644
--- a/deployments/helm-chart/templates/controller-deployment.yaml
+++ b/deployments/helm-chart/templates/controller-deployment.yaml
@@ -97,6 +97,10 @@ spec:
- name: prometheus
containerPort: {{ .Values.prometheus.port }}
{{- end }}
+{{- if .Values.serviceInsight.create }}
+ - name: service-insight
+ containerPort: {{ .Values.serviceInsight.port }}
+{{- end }}
{{- if .Values.controller.readyStatus.enable }}
- name: readiness-port
containerPort: {{ .Values.controller.readyStatus.port }}
@@ -202,6 +206,9 @@ spec:
- -enable-prometheus-metrics={{ .Values.prometheus.create }}
- -prometheus-metrics-listen-port={{ .Values.prometheus.port }}
- -prometheus-tls-secret={{ .Values.prometheus.secret }}
+ - -enable-service-insight={{ .Values.serviceInsight.create }}
+ - -service-insight-listen-port={{ .Values.serviceInsight.port }}
+ - -service-insight-tls-secret={{ .Values.serviceInsight.secret }}
- -enable-custom-resources={{ .Values.controller.enableCustomResources }}
- -enable-snippets={{ .Values.controller.enableSnippets }}
- -include-year={{ .Values.controller.includeYear }}
diff --git a/deployments/helm-chart/values.schema.json b/deployments/helm-chart/values.schema.json
index 9b1a4a0d8e..b37baec07b 100644
--- a/deployments/helm-chart/values.schema.json
+++ b/deployments/helm-chart/values.schema.json
@@ -7,6 +7,7 @@
"controller",
"rbac",
"prometheus",
+ "serviceInsight",
"nginxServiceMesh"
],
"properties": {
@@ -1436,6 +1437,56 @@
}
]
},
+ "serviceInsight": {
+ "type": "object",
+ "default": {},
+ "title": "The Service Insight Schema",
+ "required": [
+ "create"
+ ],
+ "properties": {
+ "create": {
+ "type": "boolean",
+ "default": false,
+ "title": "The create",
+ "examples": [
+ true
+ ]
+ },
+ "port": {
+ "type": "integer",
+ "default": 9114,
+ "title": "The port",
+ "examples": [
+ 9114
+ ]
+ },
+ "secret": {
+ "type": "string",
+ "default": "",
+ "title": "The secret",
+ "examples": [
+ ""
+ ]
+ },
+ "scheme": {
+ "type": "string",
+ "default": "http",
+ "title": "The scheme",
+ "examples": [
+ "http"
+ ]
+ }
+ },
+ "examples": [
+ {
+ "create": true,
+ "port": 9114,
+ "secret": "",
+ "scheme": "http"
+ }
+ ]
+ },
"nginxServiceMesh": {
"type": "object",
"default": {},
@@ -1622,6 +1673,12 @@
"secret": "",
"scheme": "http"
},
+ "serviceInsight": {
+ "create": true,
+ "port": 9114,
+ "secret": "",
+ "scheme": "http"
+ },
"nginxServiceMesh": {
"enable": false,
"enableEgress": false
diff --git a/deployments/helm-chart/values.yaml b/deployments/helm-chart/values.yaml
index d2cec03cdc..318f27634e 100644
--- a/deployments/helm-chart/values.yaml
+++ b/deployments/helm-chart/values.yaml
@@ -433,6 +433,19 @@ prometheus:
## Configures the HTTP scheme used.
scheme: http
+serviceInsight:
+ ## Expose NGINX Plus Service Insight endpoint.
+ create: false
+
+ ## Configures the port to expose endpoint.
+ port: 9114
+
+ ## Specifies the namespace/name of a Kubernetes TLS Secret which will be used to protect the Service Insight endpoint.
+ secret: ""
+
+ ## Configures the HTTP scheme used.
+ scheme: http
+
nginxServiceMesh:
## Enables integration with NGINX Service Mesh.
## Requires controller.nginxplus
diff --git a/docs/content/configuration/global-configuration/command-line-arguments.md b/docs/content/configuration/global-configuration/command-line-arguments.md
index ed8de9f3da..a31e1cd6d5 100644
--- a/docs/content/configuration/global-configuration/command-line-arguments.md
+++ b/docs/content/configuration/global-configuration/command-line-arguments.md
@@ -336,7 +336,30 @@ Format: `[1024 - 65535]` (default `9113`)
A Secret with a TLS certificate and key for TLS termination of the Prometheus metrics endpoint.
-* If the argument is not set, the prometheus endpoint will not use a TLS connection.
+* If the argument is not set, the Prometheus endpoint will not use a TLS connection.
+* If the argument is set, but the Ingress Controller is not able to fetch the Secret from Kubernetes API, the Ingress Controller will fail to start.
+
+
+
+### -enable-service-insight
+
+Exposes the Service Insight endpoint for Ingress Controller.
+
+
+
+### -service-insight-listen-port ``
+
+Sets the port where the Service Insight is exposed.
+
+Format: `[1024 - 65535]` (default `9114`)
+
+
+
+### -service-insight-tls-secret ``
+
+A Secret with a TLS certificate and key for TLS termination of the Service Insight endpoint.
+
+* If the argument is not set, the Service Insight endpoint will not use a TLS connection.
* If the argument is set, but the Ingress Controller is not able to fetch the Secret from Kubernetes API, the Ingress Controller will fail to start.
Format: `/`
diff --git a/docs/content/installation/installation-with-helm.md b/docs/content/installation/installation-with-helm.md
index 4854c1b7c2..1215045427 100644
--- a/docs/content/installation/installation-with-helm.md
+++ b/docs/content/installation/installation-with-helm.md
@@ -250,6 +250,10 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont
|``prometheus.port`` | Configures the port to scrape the metrics. | 9113 |
|``prometheus.scheme`` | Configures the HTTP scheme that requests must use to connect to the Prometheus endpoint. | http |
|``prometheus.secret`` | Specifies the namespace/name of a Kubernetes TLS secret which can be used to establish a secure HTTPS connection with the Prometheus endpoint. | "" |
+|``serviceInsight.create`` | Expose NGINX Plus Service Insight endpoint. | false |
+|``serviceInsight.port`` | Configures the port to scrape the metrics. | 9114 |
+|``serviceInsight.scheme`` | Configures the HTTP scheme to use for connections to the Service Insight endpoint. | http |
+|``serviceInsight.secret`` | The namespace / name of a Kubernetes TLS Secret. If specified, this secret is used to secure the Service Insight endpoint with TLS connections. | "" |
{{% /table %}}
## Notes
diff --git a/docs/content/logging-and-monitoring/service-insight.md b/docs/content/logging-and-monitoring/service-insight.md
new file mode 100644
index 0000000000..f55ee5a5f8
--- /dev/null
+++ b/docs/content/logging-and-monitoring/service-insight.md
@@ -0,0 +1,58 @@
+---
+title: Service Insight
+
+description: "The Ingress Controller exposes the Service Insight endpoint."
+weight: 2100
+doctypes: [""]
+aliases:
+ - /service-insight/
+toc: true
+docs: "DOCS-000"
+---
+
+
+The Ingress Controller exposes an endpoint and provides host statistics for Virtual Servers (VS).
+It exposes data in JSON format and returns HTTP status codes.
+The response body holds information about the total, down and the unhealthy number of
+upstreams associated with the hostname.
+Returned HTTP codes indicate the health of the upstreams (service).
+
+The service is not healthy (HTTP response code different than 200 OK) if all upstreams are unhealthy.
+The service is healthy if at least one upstream is healthy. In this case, the endpoint returns HTTP code 200 OK.
+
+
+
+## Enabling Service Insight Endpoint
+
+If you're using *Kubernetes manifests* (Deployment or DaemonSet) to install the Ingress Controller, to enable the Service Insight endpoint:
+1. Run the Ingress Controller with the `-enable-service-insight` [command-line argument](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments). This will expose the Ingress Controller endpoint via the path `/probe/{hostname}` on port `9114` (customizable with the `-service-insight-listen-port` command-line argument).
+1. To enable TLS for the Service Insight endpoint, configure the `-service-insight-tls-secret` cli argument with the namespace and name of a TLS Secret.
+1. Add the Service Insight port to the list of the ports of the Ingress Controller container in the template of the Ingress Controller pod:
+ ```yaml
+ - name: service-insight
+ containerPort: 9114
+ ```
+
+If you're using *Helm* to install the Ingress Controller, to enable Service Insight endpoint, configure the `serviceinsight.*` parameters of the Helm chart. See the [Installation with Helm](/nginx-ingress-controller/installation/installation-with-helm) doc.
+
+## Available Statistics and HTTP Response Codes
+
+The Service Insight provides the following statistics:
+
+* Total number of VS
+* Number of VS in 'Down' state
+* Number of VS in 'Healthy' state
+
+These statistics are returned as JSON:
+
+```json
+{ "Total": , "Up": , "Unhealthy": }
+```
+
+Response codes:
+
+* HTTP 200 OK - Service is healthy
+* HTTP 404 - No upstreams/VS found for the requested hostname
+* HTTP 503 Service Unavailable - The service is down (All upstreams/VS are "Unhealthy")
+
+**Note**: wildcards in hostnames are not supported at the moment.
diff --git a/go.mod b/go.mod
index d63fda0ee3..25546a7325 100644
--- a/go.mod
+++ b/go.mod
@@ -6,6 +6,7 @@ require (
github.com/aws/aws-sdk-go-v2/config v1.18.3
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.13.24
github.com/cert-manager/cert-manager v1.10.1
+ github.com/go-chi/chi v1.5.4
github.com/golang-jwt/jwt/v4 v4.4.2
github.com/golang/glog v1.0.0
github.com/google/go-cmp v0.5.9
diff --git a/go.sum b/go.sum
index ce7c2d1fb8..d0cdd3cad6 100644
--- a/go.sum
+++ b/go.sum
@@ -112,6 +112,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
+github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs=
+github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
diff --git a/internal/configs/configurator.go b/internal/configs/configurator.go
index 28099cf6d6..ff54420919 100644
--- a/internal/configs/configurator.go
+++ b/internal/configs/configurator.go
@@ -265,6 +265,43 @@ func (cnf *Configurator) AddOrUpdateIngress(ingEx *IngressEx) (Warnings, error)
return warnings, nil
}
+// GetVirtualServerForHost takes a hostname and returns a VS for the given hostname.
+func (cnf *Configurator) GetVirtualServerForHost(hostname string) *conf_v1.VirtualServer {
+ for _, vsEx := range cnf.virtualServers {
+ if vsEx.VirtualServer.Spec.Host == hostname {
+ return vsEx.VirtualServer
+ }
+ }
+ return nil
+}
+
+// GetUpstreamsforVirtualServer takes VS and returns a slice of upstreams.
+func (cnf *Configurator) GetUpstreamsforVirtualServer(vs *conf_v1.VirtualServer) []string {
+ glog.V(3).Infof("Get upstreamName for vs: %s", vs.Spec.Host)
+ upstreamNames := make([]string, 0, len(vs.Spec.Upstreams))
+
+ virtualServerUpstreamNamer := NewUpstreamNamerForVirtualServer(vs)
+
+ for _, u := range vs.Spec.Upstreams {
+ upstreamName := virtualServerUpstreamNamer.GetNameForUpstream(u.Name)
+ glog.V(3).Infof("upstream: %s, upstreamName: %s", u.Name, upstreamName)
+ upstreamNames = append(upstreamNames, upstreamName)
+ }
+ return upstreamNames
+}
+
+// GetUpstreamsforHost takes a hostname and returns a slice of upstreams
+// for the given hostname.
+func (cnf *Configurator) GetUpstreamsforHost(hostname string) []string {
+ glog.V(3).Infof("Get upstream for host: %s", hostname)
+ vs := cnf.GetVirtualServerForHost(hostname)
+
+ if vs != nil {
+ return cnf.GetUpstreamsforVirtualServer(vs)
+ }
+ return nil
+}
+
func (cnf *Configurator) addOrUpdateIngress(ingEx *IngressEx) (Warnings, error) {
apResources := cnf.updateApResources(ingEx)
diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go
index 8390f54162..da05e3b787 100644
--- a/internal/configs/virtualserver.go
+++ b/internal/configs/virtualserver.go
@@ -132,17 +132,20 @@ type upstreamNamer struct {
namespace string
}
-func newUpstreamNamerForVirtualServer(virtualServer *conf_v1.VirtualServer) *upstreamNamer {
+// NewUpstreamNamerForVirtualServer creates a new namer.
+//
+//nolint:revive
+func NewUpstreamNamerForVirtualServer(virtualServer *conf_v1.VirtualServer) *upstreamNamer {
return &upstreamNamer{
prefix: fmt.Sprintf("vs_%s_%s", virtualServer.Namespace, virtualServer.Name),
namespace: virtualServer.Namespace,
}
}
-func newUpstreamNamerForVirtualServerRoute(
- virtualServer *conf_v1.VirtualServer,
- virtualServerRoute *conf_v1.VirtualServerRoute,
-) *upstreamNamer {
+// NewUpstreamNamerForVirtualServerRoute creates a new namer.
+//
+//nolint:revive
+func NewUpstreamNamerForVirtualServerRoute(virtualServer *conf_v1.VirtualServer, virtualServerRoute *conf_v1.VirtualServerRoute) *upstreamNamer {
return &upstreamNamer{
prefix: fmt.Sprintf(
"vs_%s_%s_vsr_%s_%s",
@@ -334,7 +337,7 @@ func (vsc *virtualServerConfigurator) GenerateVirtualServerConfig(
// necessary for generateLocation to know what Upstream each Location references
crUpstreams := make(map[string]conf_v1.Upstream)
- virtualServerUpstreamNamer := newUpstreamNamerForVirtualServer(vsEx.VirtualServer)
+ virtualServerUpstreamNamer := NewUpstreamNamerForVirtualServer(vsEx.VirtualServer)
var upstreams []version2.Upstream
var statusMatches []version2.StatusMatch
var healthChecks []version2.HealthCheck
@@ -373,7 +376,7 @@ func (vsc *virtualServerConfigurator) GenerateVirtualServerConfig(
}
// generate upstreams for each VirtualServerRoute
for _, vsr := range vsEx.VirtualServerRoutes {
- upstreamNamer := newUpstreamNamerForVirtualServerRoute(vsEx.VirtualServer, vsr)
+ upstreamNamer := NewUpstreamNamerForVirtualServerRoute(vsEx.VirtualServer, vsr)
for _, u := range vsr.Spec.Upstreams {
if (sslConfig == nil || !vsc.cfgParams.HTTP2) && isGRPC(u.Type) {
vsc.addWarningf(vsr, "gRPC cannot be configured for upstream %s. gRPC requires enabled HTTP/2 and TLS termination", u.Name)
@@ -523,7 +526,7 @@ func (vsc *virtualServerConfigurator) GenerateVirtualServerConfig(
// generate config for subroutes of each VirtualServerRoute
for _, vsr := range vsEx.VirtualServerRoutes {
isVSR := true
- upstreamNamer := newUpstreamNamerForVirtualServerRoute(vsEx.VirtualServer, vsr)
+ upstreamNamer := NewUpstreamNamerForVirtualServerRoute(vsEx.VirtualServer, vsr)
for _, r := range vsr.Spec.Subroutes {
errorPages := errorPageDetails{
pages: r.ErrorPages,
@@ -2252,7 +2255,7 @@ func createUpstreamsForPlus(
var upstreams []version2.Upstream
isPlus := true
- upstreamNamer := newUpstreamNamerForVirtualServer(virtualServerEx.VirtualServer)
+ upstreamNamer := NewUpstreamNamerForVirtualServer(virtualServerEx.VirtualServer)
vsc := newVirtualServerConfigurator(baseCfgParams, isPlus, false, staticParams, false)
for _, u := range virtualServerEx.VirtualServer.Spec.Upstreams {
@@ -2273,7 +2276,7 @@ func createUpstreamsForPlus(
}
for _, vsr := range virtualServerEx.VirtualServerRoutes {
- upstreamNamer = newUpstreamNamerForVirtualServerRoute(virtualServerEx.VirtualServer, vsr)
+ upstreamNamer = NewUpstreamNamerForVirtualServerRoute(virtualServerEx.VirtualServer, vsr)
for _, u := range vsr.Spec.Upstreams {
isExternalNameSvc := virtualServerEx.ExternalNameSvcs[GenerateExternalNameSvcKey(vsr.Namespace, u.Service)]
if isExternalNameSvc {
diff --git a/internal/configs/virtualserver_test.go b/internal/configs/virtualserver_test.go
index 413417e3f2..4d193abd75 100644
--- a/internal/configs/virtualserver_test.go
+++ b/internal/configs/virtualserver_test.go
@@ -96,7 +96,7 @@ func TestUpstreamNamerForVirtualServer(t *testing.T) {
Namespace: "default",
},
}
- upstreamNamer := newUpstreamNamerForVirtualServer(&virtualServer)
+ upstreamNamer := NewUpstreamNamerForVirtualServer(&virtualServer)
upstream := "test"
expected := "vs_default_cafe_test"
@@ -121,7 +121,7 @@ func TestUpstreamNamerForVirtualServerRoute(t *testing.T) {
Namespace: "default",
},
}
- upstreamNamer := newUpstreamNamerForVirtualServerRoute(&virtualServer, &virtualServerRoute)
+ upstreamNamer := NewUpstreamNamerForVirtualServerRoute(&virtualServer, &virtualServerRoute)
upstream := "test"
expected := "vs_default_cafe_vsr_default_coffee_test"
@@ -5675,7 +5675,7 @@ func TestGenerateSplits(t *testing.T) {
Namespace: "default",
},
}
- upstreamNamer := newUpstreamNamerForVirtualServer(&virtualServer)
+ upstreamNamer := NewUpstreamNamerForVirtualServer(&virtualServer)
variableNamer := newVariableNamer(&virtualServer)
scIndex := 1
cfgParams := ConfigParams{}
@@ -5886,7 +5886,7 @@ func TestGenerateDefaultSplitsConfig(t *testing.T) {
Namespace: "default",
},
}
- upstreamNamer := newUpstreamNamerForVirtualServer(&virtualServer)
+ upstreamNamer := NewUpstreamNamerForVirtualServer(&virtualServer)
variableNamer := newVariableNamer(&virtualServer)
index := 1
@@ -6072,7 +6072,7 @@ func TestGenerateMatchesConfig(t *testing.T) {
},
},
}
- upstreamNamer := newUpstreamNamerForVirtualServer(&virtualServer)
+ upstreamNamer := NewUpstreamNamerForVirtualServer(&virtualServer)
variableNamer := newVariableNamer(&virtualServer)
index := 1
scIndex := 2
@@ -6454,7 +6454,7 @@ func TestGenerateMatchesConfigWithMultipleSplits(t *testing.T) {
Namespace: "default",
},
}
- upstreamNamer := newUpstreamNamerForVirtualServer(&virtualServer)
+ upstreamNamer := NewUpstreamNamerForVirtualServer(&virtualServer)
variableNamer := newVariableNamer(&virtualServer)
index := 1
scIndex := 2
diff --git a/internal/healthcheck/healthcheck.go b/internal/healthcheck/healthcheck.go
new file mode 100644
index 0000000000..b25fe58bf7
--- /dev/null
+++ b/internal/healthcheck/healthcheck.go
@@ -0,0 +1,179 @@
+// Package healthcheck provides primitives for running deep healtcheck service.
+package healthcheck
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+
+ "github.com/go-chi/chi"
+ "github.com/golang/glog"
+ "github.com/nginxinc/kubernetes-ingress/internal/configs"
+ "github.com/nginxinc/nginx-plus-go-client/client"
+ "k8s.io/utils/strings/slices"
+)
+
+// RunHealthCheck starts the deep healthcheck service.
+func RunHealthCheck(port int, plusClient *client.NginxClient, cnf *configs.Configurator, healthProbeTLSSecret *v1.Secret) {
+ addr := fmt.Sprintf(":%s", strconv.Itoa(port))
+ hs, err := NewHealthServer(addr, plusClient, cnf, healthProbeTLSSecret)
+ if err != nil {
+ glog.Fatal(err)
+ }
+ glog.Infof("Starting Service Insight listener on: %v%v", addr, "/probe")
+ glog.Fatal(hs.ListenAndServe())
+}
+
+// HealthServer holds data required for running
+// the healthcheck server.
+type HealthServer struct {
+ Server *http.Server
+ URL string
+ UpstreamsForHost func(host string) []string
+ NginxUpstreams func() (*client.Upstreams, error)
+}
+
+// NewHealthServer creates Health Server. If secret is provided,
+// the server is configured with TLS Config.
+func NewHealthServer(addr string, nc *client.NginxClient, cnf *configs.Configurator, secret *v1.Secret) (*HealthServer, error) {
+ hs := HealthServer{
+ Server: &http.Server{
+ Addr: addr,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ },
+ URL: fmt.Sprintf("http://%s/", addr),
+ UpstreamsForHost: cnf.GetUpstreamsforHost,
+ NginxUpstreams: nc.GetUpstreams,
+ }
+
+ if secret != nil {
+ tlsCert, err := makeCert(secret)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create TLS cert: %w", err)
+ }
+ hs.Server.TLSConfig = &tls.Config{
+ Certificates: []tls.Certificate{tlsCert},
+ MinVersion: tls.VersionTLS12,
+ }
+ hs.URL = fmt.Sprintf("https://%s/", addr)
+ }
+ return &hs, nil
+}
+
+// ListenAndServe starts healthcheck server.
+func (hs *HealthServer) ListenAndServe() error {
+ mux := chi.NewRouter()
+ mux.Get("/probe/{hostname}", hs.Retrieve)
+ hs.Server.Handler = mux
+ if hs.Server.TLSConfig != nil {
+ return hs.Server.ListenAndServeTLS("", "")
+ }
+ return hs.Server.ListenAndServe()
+}
+
+// Shutdown shuts down healthcheck server.
+func (hs *HealthServer) Shutdown(ctx context.Context) error {
+ return hs.Server.Shutdown(ctx)
+}
+
+// Retrieve finds health stats for the host identified by a hostname in the request URL.
+func (hs *HealthServer) Retrieve(w http.ResponseWriter, r *http.Request) {
+ hostname := chi.URLParam(r, "hostname")
+ host := sanitize(hostname)
+
+ upstreamNames := hs.UpstreamsForHost(host)
+ if len(upstreamNames) == 0 {
+ glog.Errorf("no upstreams for requested hostname %s or hostname does not exist", host)
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+
+ upstreams, err := hs.NginxUpstreams()
+ if err != nil {
+ glog.Errorf("error retrieving upstreams for requested hostname: %s", host)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ stats := countStats(upstreams, upstreamNames)
+ data, err := json.Marshal(stats)
+ if err != nil {
+ glog.Error("error marshaling result", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ switch stats.Up {
+ case 0:
+ w.WriteHeader(http.StatusServiceUnavailable)
+ default:
+ w.WriteHeader(http.StatusOK)
+ }
+ if _, err = w.Write(data); err != nil {
+ glog.Error("error writing result", err)
+ http.Error(w, "internal error", http.StatusInternalServerError)
+ }
+}
+
+func sanitize(s string) string {
+ hostname := strings.TrimSpace(s)
+ hostname = strings.ReplaceAll(hostname, "\n", "")
+ hostname = strings.ReplaceAll(hostname, "\r", "")
+ return hostname
+}
+
+// makeCert takes k8s Secret and returns tls Certificate for the server.
+// It errors if either cert, or key are not present in the Secret.
+func makeCert(s *v1.Secret) (tls.Certificate, error) {
+ cert, ok := s.Data[v1.TLSCertKey]
+ if !ok {
+ return tls.Certificate{}, errors.New("missing tls cert")
+ }
+ key, ok := s.Data[v1.TLSPrivateKeyKey]
+ if !ok {
+ return tls.Certificate{}, errors.New("missing tls key")
+ }
+ return tls.X509KeyPair(cert, key)
+}
+
+// HostStats holds information about total, up and
+// unhealthy number of 'peers' associated with the
+// given host.
+type HostStats struct {
+ Total int
+ Up int
+ Unhealthy int
+}
+
+// countStats calculates and returns statistics for a host.
+func countStats(upstreams *client.Upstreams, upstreamNames []string) HostStats {
+ total, up := 0, 0
+ for name, u := range *upstreams {
+ if !slices.Contains(upstreamNames, name) {
+ continue
+ }
+ for _, p := range u.Peers {
+ total++
+ if strings.ToLower(p.State) != "up" {
+ continue
+ }
+ up++
+ }
+ }
+
+ unhealthy := total - up
+ return HostStats{
+ Total: total,
+ Up: up,
+ Unhealthy: unhealthy,
+ }
+}
diff --git a/internal/healthcheck/healthcheck_test.go b/internal/healthcheck/healthcheck_test.go
new file mode 100644
index 0000000000..24ab5aa81b
--- /dev/null
+++ b/internal/healthcheck/healthcheck_test.go
@@ -0,0 +1,327 @@
+package healthcheck_test
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "log"
+ "net"
+ "net/http"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/nginxinc/kubernetes-ingress/internal/healthcheck"
+ "github.com/nginxinc/nginx-plus-go-client/client"
+)
+
+// newTestHealthServer is a helper func responsible for creating,
+// starting and shutting down healthcheck server for each test.
+func newTestHealthServer(t *testing.T) *healthcheck.HealthServer {
+ t.Helper()
+
+ l, err := net.Listen("tcp", ":0") //nolint:gosec
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close() //nolint:errcheck
+
+ addr := l.Addr().String()
+ hs, err := healthcheck.NewHealthServer(addr, nil, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ go func() {
+ err := hs.ListenAndServe()
+ if !errors.Is(err, http.ErrServerClosed) {
+ log.Fatal(err)
+ }
+ }()
+
+ t.Cleanup(func() {
+ err := hs.Shutdown(context.Background())
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+ return hs
+}
+
+func TestHealthCheckServer_Returns404OnMissingHostname(t *testing.T) {
+ t.Parallel()
+
+ hs := newTestHealthServer(t)
+ hs.UpstreamsForHost = getUpstreamsForHost
+ hs.NginxUpstreams = getUpstreamsFromNGINXAllUp
+
+ resp, err := http.Get(hs.URL + "probe/") //nolint:noctx
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close() //nolint:errcheck
+
+ if resp.StatusCode != http.StatusNotFound {
+ t.Error(resp.StatusCode)
+ }
+}
+
+func TestHealthCheckServer_ReturnsCorrectStatsForHostnameOnAllPeersUp(t *testing.T) {
+ t.Parallel()
+
+ hs := newTestHealthServer(t)
+ hs.UpstreamsForHost = getUpstreamsForHost
+ hs.NginxUpstreams = getUpstreamsFromNGINXAllUp
+
+ resp, err := http.Get(hs.URL + "probe/bar.tea.com") //nolint:noctx
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close() //nolint:errcheck
+
+ if resp.StatusCode != http.StatusOK {
+ t.Fatal(resp.StatusCode)
+ }
+
+ want := healthcheck.HostStats{
+ Total: 3,
+ Up: 3,
+ Unhealthy: 0,
+ }
+ var got healthcheck.HostStats
+ if err := json.NewDecoder(resp.Body).Decode(&got); err != nil {
+ t.Fatal(err)
+ }
+ if !cmp.Equal(want, got) {
+ t.Error(cmp.Diff(want, got))
+ }
+}
+
+func TestHealthCheckServer_ReturnsCorrectStatsForHostnameOnAllPeersDown(t *testing.T) {
+ t.Parallel()
+
+ hs := newTestHealthServer(t)
+ hs.UpstreamsForHost = getUpstreamsForHost
+ hs.NginxUpstreams = getUpstreamsFromNGINXAllUnhealthy
+
+ resp, err := http.Get(hs.URL + "probe/bar.tea.com") //nolint:noctx
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close() //nolint:errcheck
+
+ if resp.StatusCode != http.StatusServiceUnavailable {
+ t.Fatal(resp.StatusCode)
+ }
+
+ want := healthcheck.HostStats{
+ Total: 3,
+ Up: 0,
+ Unhealthy: 3,
+ }
+
+ var got healthcheck.HostStats
+ if err := json.NewDecoder(resp.Body).Decode(&got); err != nil {
+ t.Fatal(err)
+ }
+ if !cmp.Equal(want, got) {
+ t.Error(cmp.Diff(want, got))
+ }
+}
+
+func TestHealthCheckServer_ReturnsCorrectStatsForValidHostnameOnPartOfPeersDown(t *testing.T) {
+ t.Parallel()
+
+ hs := newTestHealthServer(t)
+ hs.UpstreamsForHost = getUpstreamsForHost
+ hs.NginxUpstreams = getUpstreamsFromNGINXPartiallyUp
+
+ resp, err := http.Get(hs.URL + "probe/bar.tea.com") //nolint:noctx
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close() //nolint:errcheck
+
+ if resp.StatusCode != http.StatusOK {
+ t.Fatal(resp.StatusCode)
+ }
+
+ want := healthcheck.HostStats{
+ Total: 3,
+ Up: 1,
+ Unhealthy: 2,
+ }
+
+ var got healthcheck.HostStats
+ if err := json.NewDecoder(resp.Body).Decode(&got); err != nil {
+ t.Fatal(err)
+ }
+ if !cmp.Equal(want, got) {
+ t.Error(cmp.Diff(want, got))
+ }
+}
+
+func TestHealthCheckServer_RespondsWith404OnNotExistingHostname(t *testing.T) {
+ t.Parallel()
+
+ hs := newTestHealthServer(t)
+ hs.UpstreamsForHost = getUpstreamsForHost
+ hs.NginxUpstreams = getUpstreamsFromNGINXNotExistingHost
+
+ resp, err := http.Get(hs.URL + "probe/foo.mocha.com") //nolint:noctx
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close() //nolint:errcheck
+
+ if resp.StatusCode != http.StatusNotFound {
+ t.Error(resp.StatusCode)
+ }
+}
+
+func TestHealthCheckServer_RespondsWith500OnErrorFromNGINXAPI(t *testing.T) {
+ t.Parallel()
+
+ hs := newTestHealthServer(t)
+ hs.UpstreamsForHost = getUpstreamsForHost
+ hs.NginxUpstreams = getUpstreamsFromNGINXErrorFromAPI
+
+ resp, err := http.Get(hs.URL + "probe/foo.tea.com") //nolint:noctx
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close() //nolint:errcheck
+
+ if resp.StatusCode != http.StatusInternalServerError {
+ t.Error(resp.StatusCode)
+ }
+}
+
+// getUpstreamsForHost is a helper func faking response from IC.
+func getUpstreamsForHost(host string) []string {
+ upstreams := map[string][]string{
+ "foo.tea.com": {"upstream1", "upstream2"},
+ "bar.tea.com": {"upstream1"},
+ }
+ u, ok := upstreams[host]
+ if !ok {
+ return []string{}
+ }
+ return u
+}
+
+// getUpstreamsFromNGINXAllUP is a helper func used
+// for faking response data from NGINX API. It responds
+// with all upstreams and 'peers' in 'Up' state.
+//
+// Upstreams retrieved using NGINX API client:
+// foo.tea.com -> upstream1, upstream2
+// bar.tea.com -> upstream2
+func getUpstreamsFromNGINXAllUp() (*client.Upstreams, error) {
+ ups := client.Upstreams{
+ "upstream1": client.Upstream{
+ Peers: []client.Peer{
+ {State: "Up"},
+ {State: "Up"},
+ {State: "Up"},
+ },
+ },
+ "upstream2": client.Upstream{
+ Peers: []client.Peer{
+ {State: "Up"},
+ {State: "Up"},
+ {State: "Up"},
+ },
+ },
+ "upstream3": client.Upstream{
+ Peers: []client.Peer{
+ {State: "Up"},
+ {State: "Up"},
+ {State: "Up"},
+ },
+ },
+ }
+ return &ups, nil
+}
+
+// getUpstreamsFromNGINXAllUnhealthy is a helper func used
+// for faking response data from NGINX API. It responds
+// with all upstreams and 'peers' in 'Down' (Unhealthy) state.
+//
+// Upstreams retrieved using NGINX API client:
+// foo.tea.com -> upstream1, upstream2
+// bar.tea.com -> upstream2
+func getUpstreamsFromNGINXAllUnhealthy() (*client.Upstreams, error) {
+ ups := client.Upstreams{
+ "upstream1": client.Upstream{
+ Peers: []client.Peer{
+ {State: "Down"},
+ {State: "Down"},
+ {State: "Down"},
+ },
+ },
+ "upstream2": client.Upstream{
+ Peers: []client.Peer{
+ {State: "Down"},
+ {State: "Down"},
+ {State: "Down"},
+ },
+ },
+ "upstream3": client.Upstream{
+ Peers: []client.Peer{
+ {State: "Down"},
+ {State: "Down"},
+ {State: "Down"},
+ },
+ },
+ }
+ return &ups, nil
+}
+
+// getUpstreamsFromNGINXPartiallyUp is a helper func used
+// for faking response data from NGINX API. It responds
+// with some upstreams and 'peers' in 'Down' (Unhealthy) state,
+// and some upstreams and 'peers' in 'Up' state.
+//
+// Upstreams retrieved using NGINX API client
+// foo.tea.com -> upstream1, upstream2
+// bar.tea.com -> upstream2
+func getUpstreamsFromNGINXPartiallyUp() (*client.Upstreams, error) {
+ ups := client.Upstreams{
+ "upstream1": client.Upstream{
+ Peers: []client.Peer{
+ {State: "Down"},
+ {State: "Down"},
+ {State: "Up"},
+ },
+ },
+ "upstream2": client.Upstream{
+ Peers: []client.Peer{
+ {State: "Down"},
+ {State: "Down"},
+ {State: "Up"},
+ },
+ },
+ "upstream3": client.Upstream{
+ Peers: []client.Peer{
+ {State: "Down"},
+ {State: "Up"},
+ {State: "Down"},
+ },
+ },
+ }
+ return &ups, nil
+}
+
+// getUpstreamsFromNGINXNotExistingHost is a helper func used
+// for faking response data from NGINX API. It responds
+// with empty upstreams on a request for not existing host.
+func getUpstreamsFromNGINXNotExistingHost() (*client.Upstreams, error) {
+ ups := client.Upstreams{}
+ return &ups, nil
+}
+
+// getUpstreamsFromNGINXErrorFromAPI is a helper func used
+// for faking err response from NGINX API client.
+func getUpstreamsFromNGINXErrorFromAPI() (*client.Upstreams, error) {
+ return nil, errors.New("nginx api error")
+}
diff --git a/tests/data/common/service/loadbalancer-with-additional-ports.yaml b/tests/data/common/service/loadbalancer-with-additional-ports.yaml
index 669b50b2a1..57b311a835 100644
--- a/tests/data/common/service/loadbalancer-with-additional-ports.yaml
+++ b/tests/data/common/service/loadbalancer-with-additional-ports.yaml
@@ -23,5 +23,9 @@ spec:
targetPort: 9113
protocol: TCP
name: exporter
+ - port: 9114
+ targetPort: 9114
+ protocol: TCP
+ name: service-insight
selector:
app: nginx-ingress
diff --git a/tests/data/common/service/nodeport-with-additional-ports.yaml b/tests/data/common/service/nodeport-with-additional-ports.yaml
index 92918b2617..9d845b2ed4 100644
--- a/tests/data/common/service/nodeport-with-additional-ports.yaml
+++ b/tests/data/common/service/nodeport-with-additional-ports.yaml
@@ -30,5 +30,9 @@ spec:
targetPort: 3334
protocol: UDP
name: udp-server
+ - port: 9114
+ targetPort: 9114
+ protocol: TCP
+ name: service-insight
selector:
app: nginx-ingress
diff --git a/tests/data/service-insight/app.yaml b/tests/data/service-insight/app.yaml
new file mode 100644
index 0000000000..7a6beecb9e
--- /dev/null
+++ b/tests/data/service-insight/app.yaml
@@ -0,0 +1,19 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: backend1
+spec:
+ replicas: 5
+ selector:
+ matchLabels:
+ app: backend1
+ template:
+ metadata:
+ labels:
+ app: backend1
+ spec:
+ containers:
+ - name: backend1
+ image: nginxdemos/nginx-hello:plain-text
+ ports:
+ - containerPort: 8080
diff --git a/tests/data/service-insight/secret.yaml b/tests/data/service-insight/secret.yaml
new file mode 100644
index 0000000000..dd72533ed6
--- /dev/null
+++ b/tests/data/service-insight/secret.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+ namespace: nginx-ingress
+type: kubernetes.io/tls
+data:
+ tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
+ tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
diff --git a/tests/suite/fixtures/fixtures.py b/tests/suite/fixtures/fixtures.py
index 1d6ae87733..ea79810c6d 100644
--- a/tests/suite/fixtures/fixtures.py
+++ b/tests/suite/fixtures/fixtures.py
@@ -85,6 +85,7 @@ def __init__(
metrics_port=9113,
tcp_server_port=3333,
udp_server_port=3334,
+ service_insight_port=9114,
):
self.public_ip = public_ip
self.port = port
@@ -93,6 +94,7 @@ def __init__(
self.metrics_port = metrics_port
self.tcp_server_port = tcp_server_port
self.udp_server_port = udp_server_port
+ self.service_insight_port = service_insight_port
class IngressControllerPrerequisites:
@@ -172,10 +174,18 @@ def ingress_controller_endpoint(cli_arguments, kube_apis, ingress_controller_pre
namespace,
f"{TEST_DATA}/common/service/nodeport-with-additional-ports.yaml",
)
- port, port_ssl, api_port, metrics_port, tcp_server_port, udp_server_port = get_service_node_ports(
- kube_apis.v1, service_name, namespace
+ (
+ port,
+ port_ssl,
+ api_port,
+ metrics_port,
+ tcp_server_port,
+ udp_server_port,
+ service_insight_port,
+ ) = get_service_node_ports(kube_apis.v1, service_name, namespace)
+ return PublicEndpoint(
+ public_ip, port, port_ssl, api_port, metrics_port, tcp_server_port, udp_server_port, service_insight_port
)
- return PublicEndpoint(public_ip, port, port_ssl, api_port, metrics_port, tcp_server_port, udp_server_port)
else:
create_service_from_yaml(
kube_apis.v1,
diff --git a/tests/suite/test_virtual_server_service_insight.py b/tests/suite/test_virtual_server_service_insight.py
new file mode 100644
index 0000000000..bbda23c107
--- /dev/null
+++ b/tests/suite/test_virtual_server_service_insight.py
@@ -0,0 +1,134 @@
+from unittest import mock
+
+import pytest
+import requests
+from settings import TEST_DATA
+from suite.utils.resources_utils import (
+ create_secret_from_yaml,
+ delete_secret,
+ ensure_response_from_backend,
+ patch_deployment_from_yaml,
+ wait_before_test,
+)
+from suite.utils.yaml_utils import get_first_host_from_yaml
+
+
+@pytest.mark.vs
+@pytest.mark.skip_for_nginx_oss
+@pytest.mark.parametrize(
+ "crd_ingress_controller, virtual_server_setup",
+ [
+ (
+ {"type": "complete", "extra_args": [f"-enable-custom-resources", f"-enable-service-insight"]},
+ {"example": "virtual-server", "app_type": "simple"},
+ )
+ ],
+ indirect=True,
+)
+class TestHealthCheckVsHttp:
+ def test_responses_svc_insight_http(
+ self, request, kube_apis, crd_ingress_controller, virtual_server_setup, ingress_controller_endpoint
+ ):
+ """test responses from service insight endpoint with http"""
+ retry = 0
+ resp = mock.Mock()
+ resp.json.return_value = {}
+ resp.status_code == 502
+ vs_source = f"{TEST_DATA}/virtual-server/standard/virtual-server.yaml"
+ host = get_first_host_from_yaml(vs_source)
+ req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.service_insight_port}/probe/{host}"
+ ensure_response_from_backend(req_url, virtual_server_setup.vs_host)
+ while (resp.json() != {"Total": 3, "Up": 3, "Unhealthy": 0}) and retry < 5:
+ resp = requests.get(req_url)
+ wait_before_test()
+ retry = +1
+
+ assert resp.status_code == 200, f"Expected 200 code for /probe/{host} but got {resp.status_code}"
+ assert resp.json() == {"Total": 3, "Up": 3, "Unhealthy": 0}
+
+
+@pytest.fixture(scope="class")
+def https_secret_setup(request, kube_apis, test_namespace):
+ print("------------------------- Deploy Secret -----------------------------------")
+ secret_name = create_secret_from_yaml(kube_apis.v1, "nginx-ingress", f"{TEST_DATA}/service-insight/secret.yaml")
+
+ def fin():
+ delete_secret(kube_apis.v1, secret_name, "nginx-ingress")
+
+ request.addfinalizer(fin)
+
+
+@pytest.mark.vs
+@pytest.mark.skip_for_nginx_oss
+@pytest.mark.parametrize(
+ "crd_ingress_controller, virtual_server_setup",
+ [
+ (
+ {
+ "type": "complete",
+ "extra_args": [
+ f"-enable-custom-resources",
+ f"-enable-service-insight",
+ f"-service-insight-tls-secret=nginx-ingress/test-secret",
+ ],
+ },
+ {"example": "virtual-server", "app_type": "simple"},
+ )
+ ],
+ indirect=True,
+)
+class TestHealthCheckVsHttps:
+ def test_responses_svc_insight_https(
+ self,
+ request,
+ kube_apis,
+ https_secret_setup,
+ ingress_controller_endpoint,
+ crd_ingress_controller,
+ virtual_server_setup,
+ ):
+ """test responses from service insight endpoint with https"""
+ retry = 0
+ resp = mock.Mock()
+ resp.json.return_value = {}
+ resp.status_code == 502
+ vs_source = f"{TEST_DATA}/virtual-server/standard/virtual-server.yaml"
+ host = get_first_host_from_yaml(vs_source)
+ req_url = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.service_insight_port}/probe/{host}"
+ ensure_response_from_backend(req_url, virtual_server_setup.vs_host)
+ while (resp.json() != {"Total": 3, "Up": 3, "Unhealthy": 0}) and retry < 5:
+ resp = requests.get(req_url, verify=False)
+ wait_before_test()
+ retry = +1
+ assert resp.status_code == 200, f"Expected 200 code for /probe/{host} but got {resp.status_code}"
+ assert resp.json() == {"Total": 3, "Up": 3, "Unhealthy": 0}
+
+ def test_responses_svc_insight_update_pods(
+ self,
+ request,
+ kube_apis,
+ https_secret_setup,
+ ingress_controller_endpoint,
+ test_namespace,
+ crd_ingress_controller,
+ virtual_server_setup,
+ ):
+ """test responses from service insight endpoint with https and update number of replicas"""
+ retry = 0
+ resp = mock.Mock()
+ resp.json.return_value = {}
+ resp.status_code == 502
+ vs_source = f"{TEST_DATA}/virtual-server/standard/virtual-server.yaml"
+ host = get_first_host_from_yaml(vs_source)
+ req_url = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.service_insight_port}/probe/{host}"
+ ensure_response_from_backend(req_url, virtual_server_setup.vs_host)
+
+ # patch backend1 deployment with 5 replicas
+ patch_deployment_from_yaml(kube_apis.apps_v1_api, test_namespace, f"{TEST_DATA}/service-insight/app.yaml")
+ ensure_response_from_backend(req_url, virtual_server_setup.vs_host)
+ while (resp.json() != {"Total": 6, "Up": 6, "Unhealthy": 0}) and retry < 5:
+ resp = requests.get(req_url, verify=False)
+ wait_before_test()
+ retry = +1
+ assert resp.status_code == 200, f"Expected 200 code for /probe/{host} but got {resp.status_code}"
+ assert resp.json() == {"Total": 6, "Up": 6, "Unhealthy": 0}
diff --git a/tests/suite/utils/resources_utils.py b/tests/suite/utils/resources_utils.py
index e88cfe225e..42c1421a7c 100644
--- a/tests/suite/utils/resources_utils.py
+++ b/tests/suite/utils/resources_utils.py
@@ -424,7 +424,7 @@ def create_service_with_name(v1: CoreV1Api, namespace, name) -> str:
return create_service(v1, namespace, dep)
-def get_service_node_ports(v1: CoreV1Api, name, namespace) -> (int, int, int, int, int, int):
+def get_service_node_ports(v1: CoreV1Api, name, namespace) -> (int, int, int, int, int, int, int):
"""
Get service allocated node_ports.
@@ -434,10 +434,11 @@ def get_service_node_ports(v1: CoreV1Api, name, namespace) -> (int, int, int, in
:return: (plain_port, ssl_port, api_port, exporter_port)
"""
resp = v1.read_namespaced_service(name, namespace)
- if len(resp.spec.ports) == 6:
+ if len(resp.spec.ports) == 7:
print("An unexpected amount of ports in a service. Check the configuration")
print(f"Service with an API port: {resp.spec.ports[2].node_port}")
print(f"Service with an Exporter port: {resp.spec.ports[3].node_port}")
+ print(f"Service with an Service Insight port: {resp.spec.ports[6].node_port}")
return (
resp.spec.ports[0].node_port,
resp.spec.ports[1].node_port,
@@ -445,6 +446,7 @@ def get_service_node_ports(v1: CoreV1Api, name, namespace) -> (int, int, int, in
resp.spec.ports[3].node_port,
resp.spec.ports[4].node_port,
resp.spec.ports[5].node_port,
+ resp.spec.ports[6].node_port,
)