From a29f7c92e9ca11b58dda0201a48331ec49f0295f Mon Sep 17 00:00:00 2001 From: Jakub Jarosz <99677300+jjngx@users.noreply.github.com> Date: Wed, 4 Jan 2023 15:23:07 +0000 Subject: [PATCH] Add healthcheck for transport server (#3361) Add healthcheck for Transport Server --- .../logging-and-monitoring/service-insight.md | 18 +- go.mod | 2 +- go.sum | 4 +- internal/configs/configurator.go | 62 +++-- internal/configs/configurator_test.go | 118 ++++++++++ internal/configs/transportserver.go | 10 +- internal/configs/virtualserver.go | 7 - internal/healthcheck/healthcheck.go | 89 ++++++-- internal/healthcheck/healthcheck_test.go | 216 +++++++++++++++++- .../test_transport_server_service_insight.py | 203 ++++++++++++++++ .../test_virtual_server_service_insight.py | 4 +- 11 files changed, 680 insertions(+), 53 deletions(-) create mode 100644 tests/suite/test_transport_server_service_insight.py diff --git a/docs/content/logging-and-monitoring/service-insight.md b/docs/content/logging-and-monitoring/service-insight.md index f55ee5a5f8..9073e633b1 100644 --- a/docs/content/logging-and-monitoring/service-insight.md +++ b/docs/content/logging-and-monitoring/service-insight.md @@ -11,7 +11,7 @@ docs: "DOCS-000" --- -The Ingress Controller exposes an endpoint and provides host statistics for Virtual Servers (VS). +The Ingress Controller exposes an endpoint and provides host statistics for Virtual Servers (VS) and Transport Servers (TS). It exposes data in JSON format and returns HTTP status codes. The response body holds information about the total, down and the unhealthy number of upstreams associated with the hostname. @@ -20,14 +20,14 @@ Returned HTTP codes indicate the health of the upstreams (service). The service is not healthy (HTTP response code different than 200 OK) if all upstreams are unhealthy. The service is healthy if at least one upstream is healthy. In this case, the endpoint returns HTTP code 200 OK. - - ## Enabling Service Insight Endpoint If you're using *Kubernetes manifests* (Deployment or DaemonSet) to install the Ingress Controller, to enable the Service Insight endpoint: -1. Run the Ingress Controller with the `-enable-service-insight` [command-line argument](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments). This will expose the Ingress Controller endpoint via the path `/probe/{hostname}` on port `9114` (customizable with the `-service-insight-listen-port` command-line argument). + +1. Run the Ingress Controller with the `-enable-service-insight` [command-line argument](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments). This will expose the Ingress Controller endpoint via paths `/probe/{hostname}` for Virtual Servers, and `/probe/ts/{service_name}` for Transport Servers on port `9114` (customizable with the `-service-insight-listen-port` command-line argument). The `service_name` parameter refers to the name of the deployed service (the service specified under `upstreams` in the transport server). 1. To enable TLS for the Service Insight endpoint, configure the `-service-insight-tls-secret` cli argument with the namespace and name of a TLS Secret. 1. Add the Service Insight port to the list of the ports of the Ingress Controller container in the template of the Ingress Controller pod: + ```yaml - name: service-insight containerPort: 9114 @@ -39,9 +39,9 @@ If you're using *Helm* to install the Ingress Controller, to enable Service Insi The Service Insight provides the following statistics: -* Total number of VS -* Number of VS in 'Down' state -* Number of VS in 'Healthy' state +* Total number of VS and TS +* Number of VS and TS in 'Up' state +* Number of VS and TS in 'Unhealthy' state These statistics are returned as JSON: @@ -52,7 +52,7 @@ These statistics are returned as JSON: Response codes: * HTTP 200 OK - Service is healthy -* HTTP 404 - No upstreams/VS found for the requested hostname -* HTTP 503 Service Unavailable - The service is down (All upstreams/VS are "Unhealthy") +* HTTP 404 Not Found - No upstreams/VS/TS found for the requested hostname/name +* HTTP 418 I'm a teapot - The service is down (All upstreams/VS/TS are "Unhealthy") **Note**: wildcards in hostnames are not supported at the moment. diff --git a/go.mod b/go.mod index b6525961cb..5b86cbc6b5 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.18.7 github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.13.26 github.com/cert-manager/cert-manager v1.10.1 - github.com/go-chi/chi v1.5.4 + github.com/go-chi/chi/v5 v5.0.8 github.com/golang-jwt/jwt/v4 v4.4.3 github.com/golang/glog v1.0.0 github.com/google/go-cmp v0.5.9 diff --git a/go.sum b/go.sum index 07c30098ce..e81ac09e60 100644 --- a/go.sum +++ b/go.sum @@ -108,8 +108,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs= -github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg= +github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= +github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= diff --git a/internal/configs/configurator.go b/internal/configs/configurator.go index ff54420919..bf398cc044 100644 --- a/internal/configs/configurator.go +++ b/internal/configs/configurator.go @@ -111,6 +111,7 @@ type Configurator struct { ingresses map[string]*IngressEx minions map[string]map[string]bool virtualServers map[string]*VirtualServerEx + transportServers map[string]*TransportServerEx tlsPassthroughPairs map[string]tlsPassthroughPair isWildcardEnabled bool isPlus bool @@ -145,6 +146,7 @@ func NewConfigurator(nginxManager nginx.Manager, staticCfgParams *StaticConfigPa cfgParams: config, ingresses: make(map[string]*IngressEx), virtualServers: make(map[string]*VirtualServerEx), + transportServers: make(map[string]*TransportServerEx), templateExecutor: templateExecutor, templateExecutorV2: templateExecutorV2, minions: make(map[string]map[string]bool), @@ -265,8 +267,8 @@ func (cnf *Configurator) AddOrUpdateIngress(ingEx *IngressEx) (Warnings, error) return warnings, nil } -// GetVirtualServerForHost takes a hostname and returns a VS for the given hostname. -func (cnf *Configurator) GetVirtualServerForHost(hostname string) *conf_v1.VirtualServer { +// virtualServerForHost takes a hostname and returns a VS for the given hostname. +func (cnf *Configurator) virtualServerForHost(hostname string) *conf_v1.VirtualServer { for _, vsEx := range cnf.virtualServers { if vsEx.VirtualServer.Spec.Host == hostname { return vsEx.VirtualServer @@ -275,8 +277,8 @@ func (cnf *Configurator) GetVirtualServerForHost(hostname string) *conf_v1.Virtu return nil } -// GetUpstreamsforVirtualServer takes VS and returns a slice of upstreams. -func (cnf *Configurator) GetUpstreamsforVirtualServer(vs *conf_v1.VirtualServer) []string { +// upstreamsForVirtualServer takes VirtualServer and returns a list of associated upstreams. +func (cnf *Configurator) upstreamsForVirtualServer(vs *conf_v1.VirtualServer) []string { glog.V(3).Infof("Get upstreamName for vs: %s", vs.Spec.Host) upstreamNames := make([]string, 0, len(vs.Spec.Upstreams)) @@ -290,18 +292,52 @@ func (cnf *Configurator) GetUpstreamsforVirtualServer(vs *conf_v1.VirtualServer) return upstreamNames } -// GetUpstreamsforHost takes a hostname and returns a slice of upstreams -// for the given hostname. -func (cnf *Configurator) GetUpstreamsforHost(hostname string) []string { +// UpstreamsForHost takes a hostname and returns upstreams for the given hostname. +func (cnf *Configurator) UpstreamsForHost(hostname string) []string { glog.V(3).Infof("Get upstream for host: %s", hostname) - vs := cnf.GetVirtualServerForHost(hostname) - + vs := cnf.virtualServerForHost(hostname) if vs != nil { - return cnf.GetUpstreamsforVirtualServer(vs) + return cnf.upstreamsForVirtualServer(vs) + } + return nil +} + +// StreamUpstreamsForName takes a name and returns stream upstreams +// associated with this name. The name represents TS's +// (TransportServer) action name. +func (cnf *Configurator) StreamUpstreamsForName(name string) []string { + glog.V(3).Infof("Get stream upstreams for name: '%s'", name) + ts := cnf.transportServerForActionName(name) + if ts != nil { + return cnf.streamUpstreamsForTransportServer(ts) + } + return nil +} + +// transportServerForActionName takes an action name and returns +// Transport Server obj associated with that name. +func (cnf *Configurator) transportServerForActionName(name string) *conf_v1alpha1.TransportServer { + for _, tsEx := range cnf.transportServers { + glog.V(3).Infof("Check ts action '%s' for requested name: '%s'", tsEx.TransportServer.Spec.Action.Pass, name) + if tsEx.TransportServer.Spec.Action.Pass == name { + return tsEx.TransportServer + } } return nil } +// streamUpstreamsForTransportServer takes TransportServer obj and returns +// a list of stream upstreams associated with this TransportServer. +func (cnf *Configurator) streamUpstreamsForTransportServer(ts *conf_v1alpha1.TransportServer) []string { + upstreamNames := make([]string, 0, len(ts.Spec.Upstreams)) + n := newUpstreamNamerForTransportServer(ts) + for _, u := range ts.Spec.Upstreams { + un := n.GetNameForUpstream(u.Name) + upstreamNames = append(upstreamNames, un) + } + return upstreamNames +} + func (cnf *Configurator) addOrUpdateIngress(ingEx *IngressEx) (Warnings, error) { apResources := cnf.updateApResources(ingEx) @@ -489,8 +525,7 @@ func (cnf *Configurator) AddOrUpdateVirtualServer(virtualServerEx *VirtualServer } func (cnf *Configurator) addOrUpdateOpenTracingTracerConfig(content string) error { - err := cnf.nginxManager.CreateOpenTracingTracerConfig(content) - return err + return cnf.nginxManager.CreateOpenTracingTracerConfig(content) } func (cnf *Configurator) addOrUpdateVirtualServer(virtualServerEx *VirtualServerEx) (Warnings, error) { @@ -632,9 +667,10 @@ func (cnf *Configurator) addOrUpdateTransportServer(transportServerEx *Transport if cnf.isPlus && cnf.isPrometheusEnabled { cnf.updateTransportServerMetricsLabels(transportServerEx, tsCfg.Upstreams) } - cnf.nginxManager.CreateStreamConfig(name, content) + cnf.transportServers[name] = transportServerEx + // update TLS Passthrough Hosts config in case we have a TLS Passthrough TransportServer // only TLS Passthrough TransportServers have non-empty hosts if transportServerEx.TransportServer.Spec.Host != "" { diff --git a/internal/configs/configurator_test.go b/internal/configs/configurator_test.go index 42a8648392..b6ad8ec1bc 100644 --- a/internal/configs/configurator_test.go +++ b/internal/configs/configurator_test.go @@ -1281,3 +1281,121 @@ func TestUpdateApResourcesForVs(t *testing.T) { } } } + +func TestUpstreamsForHost_ReturnsNilForNoVirtualServers(t *testing.T) { + t.Parallel() + + tcnf := createTestConfigurator(t) + tcnf.virtualServers = map[string]*VirtualServerEx{ + "vs": invalidVirtualServerEx, + } + + got := tcnf.UpstreamsForHost("tea.example.com") + if got != nil { + t.Errorf("want nil, got %+v", got) + } +} + +func TestUpstreamsForHost_DoesNotReturnUpstreamsOnBogusHostname(t *testing.T) { + t.Parallel() + + tcnf := createTestConfigurator(t) + tcnf.virtualServers = map[string]*VirtualServerEx{ + "vs": validVirtualServerExWithUpstreams, + } + + got := tcnf.UpstreamsForHost("bogus.host.org") + if got != nil { + t.Errorf("want nil, got %+v", got) + } +} + +func TestUpstreamsForHost_ReturnsUpstreamsNamesForValidHostname(t *testing.T) { + t.Parallel() + tcnf := createTestConfigurator(t) + tcnf.virtualServers = map[string]*VirtualServerEx{ + "vs": validVirtualServerExWithUpstreams, + } + + want := []string{"vs_default_test-vs_tea-app"} + got := tcnf.UpstreamsForHost("tea.example.com") + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + +func TestStreamUpstreamsForName_DoesNotReturnUpstreamsForBogusName(t *testing.T) { + t.Parallel() + + tcnf := createTestConfigurator(t) + tcnf.transportServers = map[string]*TransportServerEx{ + "ts": validTransportServerExWithUpstreams, + } + + got := tcnf.StreamUpstreamsForName("bogus-service-name") + if got != nil { + t.Errorf("want nil, got %+v", got) + } +} + +func TestStreamUpstreamsForName_ReturnsStreamUpstreamsNamesOnValidServiceName(t *testing.T) { + t.Parallel() + + tcnf := createTestConfigurator(t) + tcnf.transportServers = map[string]*TransportServerEx{ + "ts": validTransportServerExWithUpstreams, + } + + want := []string{"ts_default_secure-app_secure-app"} + got := tcnf.StreamUpstreamsForName("secure-app") + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + +var ( + invalidVirtualServerEx = &VirtualServerEx{ + VirtualServer: &conf_v1.VirtualServer{}, + } + validVirtualServerExWithUpstreams = &VirtualServerEx{ + VirtualServer: &conf_v1.VirtualServer{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "test-vs", + Namespace: "default", + }, + Spec: conf_v1.VirtualServerSpec{ + Host: "tea.example.com", + Upstreams: []conf_v1.Upstream{ + { + Name: "tea-app", + }, + }, + }, + }, + } + validTransportServerExWithUpstreams = &TransportServerEx{ + TransportServer: &conf_v1alpha1.TransportServer{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "secure-app", + Namespace: "default", + }, + Spec: conf_v1alpha1.TransportServerSpec{ + Listener: conf_v1alpha1.TransportServerListener{ + Name: "tls-passthrough", + Protocol: "TLS_PASSTHROUGH", + }, + Host: "example.com", + Upstreams: []conf_v1alpha1.Upstream{ + { + Name: "secure-app", + Service: "secure-app", + Port: 8443, + }, + }, + Action: &conf_v1alpha1.Action{ + Pass: "secure-app", + }, + }, + }, + } +) diff --git a/internal/configs/transportserver.go b/internal/configs/transportserver.go index b1db784a38..ba7d0b9e3a 100644 --- a/internal/configs/transportserver.go +++ b/internal/configs/transportserver.go @@ -24,14 +24,18 @@ func (tsEx *TransportServerEx) String() string { if tsEx == nil { return "" } - if tsEx.TransportServer == nil { return "TransportServerEx has no TransportServer" } - return fmt.Sprintf("%s/%s", tsEx.TransportServer.Namespace, tsEx.TransportServer.Name) } +func newUpstreamNamerForTransportServer(transportServer *conf_v1alpha1.TransportServer) *upstreamNamer { + return &upstreamNamer{ + prefix: fmt.Sprintf("ts_%s_%s", transportServer.Namespace, transportServer.Name), + } +} + // generateTransportServerConfig generates a full configuration for a TransportServer. func generateTransportServerConfig(transportServerEx *TransportServerEx, listenerPort int, isPlus bool, isResolverConfigured bool) (*version2.TransportServerConfig, Warnings) { upstreamNamer := newUpstreamNamerForTransportServer(transportServerEx.TransportServer) @@ -105,7 +109,6 @@ func generateUnixSocket(transportServerEx *TransportServerEx) string { if transportServerEx.TransportServer.Spec.Listener.Name == conf_v1alpha1.TLSPassthroughListenerName { return fmt.Sprintf("unix:/var/lib/nginx/passthrough-%s_%s.sock", transportServerEx.TransportServer.Namespace, transportServerEx.TransportServer.Name) } - return "" } @@ -172,7 +175,6 @@ func generateTransportServerHealthCheck(upstreamName string, generatedUpstreamNa break } } - return hc, match } diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go index da05e3b787..46c37caa99 100644 --- a/internal/configs/virtualserver.go +++ b/internal/configs/virtualserver.go @@ -8,7 +8,6 @@ import ( "github.com/golang/glog" "github.com/nginxinc/kubernetes-ingress/internal/k8s/secrets" "github.com/nginxinc/kubernetes-ingress/internal/nginx" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" api_v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" @@ -158,12 +157,6 @@ func NewUpstreamNamerForVirtualServerRoute(virtualServer *conf_v1.VirtualServer, } } -func newUpstreamNamerForTransportServer(transportServer *conf_v1alpha1.TransportServer) *upstreamNamer { - return &upstreamNamer{ - prefix: fmt.Sprintf("ts_%s_%s", transportServer.Namespace, transportServer.Name), - } -} - func (namer *upstreamNamer) GetNameForUpstreamFromAction(action *conf_v1.Action) string { var upstream string if action.Proxy != nil && action.Proxy.Upstream != "" { diff --git a/internal/healthcheck/healthcheck.go b/internal/healthcheck/healthcheck.go index b25fe58bf7..5c74d8d7a5 100644 --- a/internal/healthcheck/healthcheck.go +++ b/internal/healthcheck/healthcheck.go @@ -14,7 +14,7 @@ import ( v1 "k8s.io/api/core/v1" - "github.com/go-chi/chi" + "github.com/go-chi/chi/v5" "github.com/golang/glog" "github.com/nginxinc/kubernetes-ingress/internal/configs" "github.com/nginxinc/nginx-plus-go-client/client" @@ -35,10 +35,12 @@ func RunHealthCheck(port int, plusClient *client.NginxClient, cnf *configs.Confi // HealthServer holds data required for running // the healthcheck server. type HealthServer struct { - Server *http.Server - URL string - UpstreamsForHost func(host string) []string - NginxUpstreams func() (*client.Upstreams, error) + Server *http.Server + URL string + UpstreamsForHost func(host string) []string + NginxUpstreams func() (*client.Upstreams, error) + StreamUpstreamsForName func(host string) []string + NginxStreamUpstreams func() (*client.StreamUpstreams, error) } // NewHealthServer creates Health Server. If secret is provided, @@ -50,9 +52,11 @@ func NewHealthServer(addr string, nc *client.NginxClient, cnf *configs.Configura ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, }, - URL: fmt.Sprintf("http://%s/", addr), - UpstreamsForHost: cnf.GetUpstreamsforHost, - NginxUpstreams: nc.GetUpstreams, + URL: fmt.Sprintf("http://%s/", addr), + UpstreamsForHost: cnf.UpstreamsForHost, + NginxUpstreams: nc.GetUpstreams, + StreamUpstreamsForName: cnf.StreamUpstreamsForName, + NginxStreamUpstreams: nc.GetStreamUpstreams, } if secret != nil { @@ -72,7 +76,8 @@ func NewHealthServer(addr string, nc *client.NginxClient, cnf *configs.Configura // ListenAndServe starts healthcheck server. func (hs *HealthServer) ListenAndServe() error { mux := chi.NewRouter() - mux.Get("/probe/{hostname}", hs.Retrieve) + mux.Get("/probe/{hostname}", hs.UpstreamStats) + mux.Get("/probe/ts/{name}", hs.StreamStats) hs.Server.Handler = mux if hs.Server.TLSConfig != nil { return hs.Server.ListenAndServeTLS("", "") @@ -85,8 +90,8 @@ func (hs *HealthServer) Shutdown(ctx context.Context) error { return hs.Server.Shutdown(ctx) } -// Retrieve finds health stats for the host identified by a hostname in the request URL. -func (hs *HealthServer) Retrieve(w http.ResponseWriter, r *http.Request) { +// UpstreamStats calculates health stats for the host identified by the hostname in the request URL. +func (hs *HealthServer) UpstreamStats(w http.ResponseWriter, r *http.Request) { hostname := chi.URLParam(r, "hostname") host := sanitize(hostname) @@ -114,7 +119,7 @@ func (hs *HealthServer) Retrieve(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") switch stats.Up { case 0: - w.WriteHeader(http.StatusServiceUnavailable) + w.WriteHeader(http.StatusTeapot) default: w.WriteHeader(http.StatusOK) } @@ -124,6 +129,43 @@ func (hs *HealthServer) Retrieve(w http.ResponseWriter, r *http.Request) { } } +// StreamStats calculates health stats for the TransportServer(s) +// identified by the service (action) name in the request URL. +func (hs *HealthServer) StreamStats(w http.ResponseWriter, r *http.Request) { + name := chi.URLParam(r, "name") + n := sanitize(name) + streamUpstreamNames := hs.StreamUpstreamsForName(n) + if len(streamUpstreamNames) == 0 { + glog.Errorf("no stream upstreams for requested name '%s' or name does not exist", n) + w.WriteHeader(http.StatusNotFound) + return + } + streams, err := hs.NginxStreamUpstreams() + if err != nil { + glog.Errorf("error retrieving stream upstreams for requested name: %s", n) + w.WriteHeader(http.StatusInternalServerError) + return + } + stats := countStreamStats(streams, streamUpstreamNames) + data, err := json.Marshal(stats) + if err != nil { + glog.Error("error marshaling result", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + switch stats.Up { + case 0: + w.WriteHeader(http.StatusTeapot) + default: + w.WriteHeader(http.StatusOK) + } + if _, err := w.Write(data); err != nil { + glog.Error("error writing result", err) + http.Error(w, "internal error", http.StatusInternalServerError) + } +} + func sanitize(s string) string { hostname := strings.TrimSpace(s) hostname = strings.ReplaceAll(hostname, "\n", "") @@ -169,11 +211,30 @@ func countStats(upstreams *client.Upstreams, upstreamNames []string) HostStats { up++ } } + return HostStats{ + Total: total, + Up: up, + Unhealthy: total - up, + } +} - unhealthy := total - up +func countStreamStats(streams *client.StreamUpstreams, streamUpstreamNames []string) HostStats { + total, up := 0, 0 + for name, s := range *streams { + if !slices.Contains(streamUpstreamNames, name) { + continue + } + for _, p := range s.Peers { + total++ + if strings.ToLower(p.State) != "up" { + continue + } + up++ + } + } return HostStats{ Total: total, Up: up, - Unhealthy: unhealthy, + Unhealthy: total - up, } } diff --git a/internal/healthcheck/healthcheck_test.go b/internal/healthcheck/healthcheck_test.go index 24ab5aa81b..2090f35f59 100644 --- a/internal/healthcheck/healthcheck_test.go +++ b/internal/healthcheck/healthcheck_test.go @@ -109,7 +109,7 @@ func TestHealthCheckServer_ReturnsCorrectStatsForHostnameOnAllPeersDown(t *testi } defer resp.Body.Close() //nolint:errcheck - if resp.StatusCode != http.StatusServiceUnavailable { + if resp.StatusCode != http.StatusTeapot { t.Fatal(resp.StatusCode) } @@ -196,6 +196,135 @@ func TestHealthCheckServer_RespondsWith500OnErrorFromNGINXAPI(t *testing.T) { } } +func TestHealthCheckServer_Returns404OnMissingTransportServerActionName(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXAllUp + + resp, err := http.Get(hs.URL + "probe/ts/") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusNotFound { + t.Error(resp.StatusCode) + } +} + +func TestHealthCheckServer_Returns404OnBogusTransportServerActionName(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXAllUp + + resp, err := http.Get(hs.URL + "probe/ts/bogusname") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusNotFound { + t.Error(resp.StatusCode) + } +} + +func TestHealthCheckServer_ReturnsCorrectTransportServerStatsForNameOnAllPeersUp(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXAllUp + + resp, err := http.Get(hs.URL + "probe/ts/foo-app") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusOK { + t.Fatal(resp.StatusCode) + } + + want := healthcheck.HostStats{ + Total: 6, + Up: 6, + Unhealthy: 0, + } + var got healthcheck.HostStats + if err := json.NewDecoder(resp.Body).Decode(&got); err != nil { + t.Fatal(err) + } + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + +func TestHealthCheckServer_ReturnsCorrectTransportServerStatsForNameOnSomePeersUpSomeDown(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXPartiallyUp + + resp, err := http.Get(hs.URL + "probe/ts/foo-app") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusOK { + t.Fatal(resp.StatusCode) + } + + want := healthcheck.HostStats{ + Total: 6, + Up: 4, + Unhealthy: 2, + } + var got healthcheck.HostStats + if err := json.NewDecoder(resp.Body).Decode(&got); err != nil { + t.Fatal(err) + } + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + +func TestHealthCheckServer_ReturnsCorrectTransportServerStatsForNameOnAllPeersDown(t *testing.T) { + t.Parallel() + + hs := newTestHealthServer(t) + hs.StreamUpstreamsForName = streamUpstreamsForName + hs.NginxStreamUpstreams = streamUpstreamsFromNGINXAllPeersDown + + resp, err := http.Get(hs.URL + "probe/ts/foo-app") //nolint:noctx + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusTeapot { + t.Fatal(resp.StatusCode) + } + + want := healthcheck.HostStats{ + Total: 6, + Up: 0, + Unhealthy: 6, + } + var got healthcheck.HostStats + if err := json.NewDecoder(resp.Body).Decode(&got); err != nil { + t.Fatal(err) + } + if !cmp.Equal(want, got) { + t.Error(cmp.Diff(want, got)) + } +} + // getUpstreamsForHost is a helper func faking response from IC. func getUpstreamsForHost(host string) []string { upstreams := map[string][]string{ @@ -325,3 +454,88 @@ func getUpstreamsFromNGINXNotExistingHost() (*client.Upstreams, error) { func getUpstreamsFromNGINXErrorFromAPI() (*client.Upstreams, error) { return nil, errors.New("nginx api error") } + +// streamUpstreamsForName is a helper func faking response from IC. +func streamUpstreamsForName(name string) []string { + upstreams := map[string][]string{ + "foo-app": {"streamUpstream1", "streamUpstream2"}, + "bar-app": {"streamUpstream1"}, + } + u, ok := upstreams[name] + if !ok { + return []string{} + } + return u +} + +// streamUpstreamsFromNGINXAllUp is a helper func +// for faking response from NGINX Plus client. +// +//nolint:unparam +func streamUpstreamsFromNGINXAllUp() (*client.StreamUpstreams, error) { + streamUpstreams := client.StreamUpstreams{ + "streamUpstream1": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Up"}, + {State: "Up"}, + {State: "Up"}, + }, + }, + "streamUpstream2": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Up"}, + {State: "Up"}, + {State: "Up"}, + }, + }, + } + return &streamUpstreams, nil +} + +// streamUpstreamsFromNGINXPartiallyUp is a helper func +// for faking response from NGINX Plus client. +// +//nolint:unparam +func streamUpstreamsFromNGINXPartiallyUp() (*client.StreamUpstreams, error) { + streamUpstreams := client.StreamUpstreams{ + "streamUpstream1": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Up"}, + {State: "Down"}, + {State: "Up"}, + }, + }, + "streamUpstream2": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Down"}, + {State: "Up"}, + {State: "Up"}, + }, + }, + } + return &streamUpstreams, nil +} + +// streamUpstreamsFromNGINXAllPeersDown is a helper func +// for faking response from NGINX Plus client. +// +//nolint:unparam +func streamUpstreamsFromNGINXAllPeersDown() (*client.StreamUpstreams, error) { + streamUpstreams := client.StreamUpstreams{ + "streamUpstream1": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Down"}, + {State: "Down"}, + {State: "Down"}, + }, + }, + "streamUpstream2": client.StreamUpstream{ + Peers: []client.StreamPeer{ + {State: "Down"}, + {State: "Down"}, + {State: "Down"}, + }, + }, + } + return &streamUpstreams, nil +} diff --git a/tests/suite/test_transport_server_service_insight.py b/tests/suite/test_transport_server_service_insight.py new file mode 100644 index 0000000000..9bda5959f3 --- /dev/null +++ b/tests/suite/test_transport_server_service_insight.py @@ -0,0 +1,203 @@ +from pprint import pprint +from unittest import mock + +import pytest +import requests +from settings import DEPLOYMENTS, TEST_DATA +from suite.fixtures.fixtures import PublicEndpoint +from suite.utils.custom_resources_utils import create_ts_from_yaml, delete_ts, read_ts +from suite.utils.resources_utils import ( + create_items_from_yaml, + create_secret_from_yaml, + delete_items_from_yaml, + delete_secret, + get_first_pod_name, + get_nginx_template_conf, + replace_configmap_from_yaml, + wait_before_test, + wait_until_all_pods_are_ready, +) +from suite.utils.ssl_utils import create_sni_session +from suite.utils.vs_vsr_resources_utils import create_virtual_server_from_yaml, delete_virtual_server, read_vs +from suite.utils.yaml_utils import get_first_host_from_yaml + + +class TransportServerTlsSetup: + """ + Encapsulate Transport Server details. + + Attributes: + public_endpoint (object): + ts_resource (dict): + name (str): + namespace (str): + ts_host (str): + """ + + def __init__(self, public_endpoint: PublicEndpoint, ts_resource, name, namespace, ts_host): + self.public_endpoint = public_endpoint + self.ts_resource = ts_resource + self.name = name + self.namespace = namespace + self.ts_host = ts_host + + +@pytest.fixture(scope="class") +def transport_server_tls_passthrough_setup( + request, kube_apis, test_namespace, ingress_controller_endpoint +) -> TransportServerTlsSetup: + """ + Prepare Transport Server Example. + + :param request: internal pytest fixture to parametrize this method + :param kube_apis: client apis + :param test_namespace: namespace for test resources + :param ingress_controller_endpoint: ip and port information + :return TransportServerTlsSetup: + """ + print("------------------------- Deploy Transport Server with tls passthrough -----------------------------------") + # deploy secure_app + secure_app_file = f"{TEST_DATA}/{request.param['example']}/standard/secure-app.yaml" + create_items_from_yaml(kube_apis, secure_app_file, test_namespace) + + # deploy transport server + transport_server_std_src = f"{TEST_DATA}/{request.param['example']}/standard/transport-server.yaml" + ts_resource = create_ts_from_yaml(kube_apis.custom_objects, transport_server_std_src, test_namespace) + ts_host = get_first_host_from_yaml(transport_server_std_src) + wait_until_all_pods_are_ready(kube_apis.v1, test_namespace) + + def fin(): + if request.config.getoption("--skip-fixture-teardown") == "no": + print("Clean up TransportServer and app:") + delete_ts(kube_apis.custom_objects, ts_resource, test_namespace) + delete_items_from_yaml(kube_apis, secure_app_file, test_namespace) + + request.addfinalizer(fin) + + return TransportServerTlsSetup( + ingress_controller_endpoint, + ts_resource, + ts_resource["metadata"]["name"], + test_namespace, + ts_host, + ) + + +@pytest.mark.ts +@pytest.mark.skip_for_nginx_oss +@pytest.mark.parametrize( + "crd_ingress_controller, transport_server_tls_passthrough_setup", + [ + ( + { + "type": "complete", + "extra_args": [ + "-enable-tls-passthrough=true", + "-enable-service-insight", + ], + }, + {"example": "transport-server-tls-passthrough"}, + ) + ], + indirect=True, +) +class TestTransportServerTSServiceInsightHTTP: + def test_ts_service_insight( + self, + kube_apis, + crd_ingress_controller, + transport_server_tls_passthrough_setup, + test_namespace, + ingress_controller_endpoint, + ): + """ + Test Service Insight Endpoint with Transport Server on HTTP port. + """ + session = create_sni_session() + req_url = ( + f"https://{transport_server_tls_passthrough_setup.public_endpoint.public_ip}:" + f"{transport_server_tls_passthrough_setup.public_endpoint.port_ssl}" + ) + wait_before_test() + resp = session.get( + req_url, + headers={"host": transport_server_tls_passthrough_setup.ts_host}, + verify=False, + ) + assert resp.status_code == 200 + assert f"hello from pod {get_first_pod_name(kube_apis.v1, test_namespace)}" in resp.text + + # Service Insight test + retry = 0 + resp = mock.Mock() + resp.json.return_value = {} + resp.status_code == 502 + + service_insight_endpoint = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.service_insight_port}/probe/ts/secure-app" + resp = requests.get(service_insight_endpoint) + assert resp.status_code == 200, f"Expected 200 code for /probe/ts/secure-app but got {resp.status_code}" + + while (resp.json() != {"Total": 1, "Up": 1, "Unhealthy": 0}) and retry < 5: + resp = requests.get(service_insight_endpoint) + wait_before_test() + retry = +1 + assert resp.json() == {"Total": 1, "Up": 1, "Unhealthy": 0} + + +@pytest.fixture(scope="class") +def https_secret_setup(request, kube_apis, test_namespace): + print("------------------------- Deploy Secret -----------------------------------") + secret_name = create_secret_from_yaml(kube_apis.v1, "nginx-ingress", f"{TEST_DATA}/service-insight/secret.yaml") + + def fin(): + delete_secret(kube_apis.v1, secret_name, "nginx-ingress") + + request.addfinalizer(fin) + + +@pytest.mark.ts +@pytest.mark.skip_for_nginx_oss +@pytest.mark.parametrize( + "crd_ingress_controller, transport_server_tls_passthrough_setup", + [ + ( + { + "type": "complete", + "extra_args": [ + "-enable-tls-passthrough=true", + "-enable-service-insight", + "-service-insight-tls-secret=nginx-ingress/test-secret", + ], + }, + {"example": "transport-server-tls-passthrough"}, + ) + ], + indirect=True, +) +class TestTransportServerTSServiceInsightHTTPS: + def test_ts_service_insight_https( + self, + kube_apis, + https_secret_setup, + crd_ingress_controller, + transport_server_tls_passthrough_setup, + test_namespace, + ingress_controller_endpoint, + ): + """ + Test Service Insight Endpoint with Transport Server on HTTPS port. + """ + retry = 0 + resp = mock.Mock() + resp.json.return_value = {} + resp.status_code == 502 + + service_insight_endpoint = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.service_insight_port}/probe/ts/secure-app" + resp = requests.get(service_insight_endpoint, verify=False) + assert resp.status_code == 200, f"Expected 200 code for /probe/ts/secure-app but got {resp.status_code}" + + while (resp.json() != {"Total": 1, "Up": 1, "Unhealthy": 0}) and retry < 5: + resp = requests.get(service_insight_endpoint, verify=False) + wait_before_test() + retry = +1 + assert resp.json() == {"Total": 1, "Up": 1, "Unhealthy": 0} diff --git a/tests/suite/test_virtual_server_service_insight.py b/tests/suite/test_virtual_server_service_insight.py index bbda23c107..81cea90823 100644 --- a/tests/suite/test_virtual_server_service_insight.py +++ b/tests/suite/test_virtual_server_service_insight.py @@ -25,7 +25,7 @@ ], indirect=True, ) -class TestHealthCheckVsHttp: +class TestVirtualServerServiceInsightHTTP: def test_responses_svc_insight_http( self, request, kube_apis, crd_ingress_controller, virtual_server_setup, ingress_controller_endpoint ): @@ -77,7 +77,7 @@ def fin(): ], indirect=True, ) -class TestHealthCheckVsHttps: +class TestVirtualServerServiceInsightHTTPS: def test_responses_svc_insight_https( self, request,