From 6661406b75c7de71971714151fd411e447c1469e Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Sat, 30 Nov 2019 12:33:34 +0900 Subject: [PATCH 01/13] Metrics provider for deployments and services behind Envoy Assumes `envoy:smi` as the mesh provider name as I've successfully tested the progressive delivery for Envoy + Crossover with it. This enhances Flagger to translate it to the metrics provider name of `envoy` for deployment targets, or `envoy:service` for service targets. The `envoy` metrics provider is equivalent to `appmesh`, as both relies on the same set of standard metrics exposed by Envoy itself. The `envoy:service` is almost the same as the `envoy` provider, but removing the condition on pod name, as we only need to filter on the backing service name = envoy_cluster_name. We don't consider other Envoy xDS implementations that uses anything that is different to original servicen ames as `envoy_cluster_name`, for now. Ref #385 --- cmd/flagger/main.go | 2 +- pkg/controller/controller_test.go | 2 +- pkg/controller/scheduler.go | 17 ++++++- pkg/metrics/envoy_service.go | 73 ++++++++++++++++++++++++++++++ pkg/metrics/envoy_service_test.go | 74 +++++++++++++++++++++++++++++++ pkg/metrics/factory.go | 14 +++--- 6 files changed, 170 insertions(+), 12 deletions(-) create mode 100644 pkg/metrics/envoy_service.go create mode 100644 pkg/metrics/envoy_service_test.go diff --git a/cmd/flagger/main.go b/cmd/flagger/main.go index 8548de560..7d9df9138 100644 --- a/cmd/flagger/main.go +++ b/cmd/flagger/main.go @@ -161,7 +161,7 @@ func main() { logger.Infof("Watching namespace %s", namespace) } - observerFactory, err := metrics.NewFactory(metricsServer, meshProvider, 5*time.Second) + observerFactory, err := metrics.NewFactory(metricsServer, 5*time.Second) if err != nil { logger.Fatalf("Error building prometheus client: %s", err.Error()) } diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 8554c4366..6945ae2ad 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -73,7 +73,7 @@ func SetupMocks(c *flaggerv1.Canary) Mocks { rf := router.NewFactory(nil, kubeClient, flaggerClient, "annotationsPrefix", logger, flaggerClient) // init observer - observerFactory, _ := metrics.NewFactory("fake", "istio", 5*time.Second) + observerFactory, _ := metrics.NewFactory("fake", 5*time.Second) // init canary factory configTracker := canary.ConfigTracker{ diff --git a/pkg/controller/scheduler.go b/pkg/controller/scheduler.go index 25cdabb6c..ec16b6023 100644 --- a/pkg/controller/scheduler.go +++ b/pkg/controller/scheduler.go @@ -13,6 +13,10 @@ import ( "github.com/weaveworks/flagger/pkg/router" ) +const ( + MetricsProviderServiceSuffix = ":service" +) + // scheduleCanaries synchronises the canary map with the jobs map, // for new canaries new jobs are created and started // for the removed canaries the jobs are stopped and deleted @@ -747,10 +751,19 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool { if r.Spec.Provider != "" { metricsProvider = r.Spec.Provider - // set the metrics server to Linkerd Prometheus when Linkerd is the default mesh provider + // set the metrics provider to Linkerd Prometheus when Linkerd is the default mesh provider if strings.Contains(c.meshProvider, "linkerd") { metricsProvider = "linkerd" } + + // set the metrics provider to Envoy Prometheus when Envoy is the default mesh provider + if strings.Contains(c.meshProvider, "envoy") { + metricsProvider = "envoy" + } + } + // set the metrics provider to query Prometheus for the canary Kubernetes service if the canary target is Service + if r.Spec.TargetRef.Kind == "Service" { + metricsProvider = metricsProvider + MetricsProviderServiceSuffix } // create observer based on the mesh provider @@ -761,7 +774,7 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool { if r.Spec.MetricsServer != "" { metricsServer = r.Spec.MetricsServer var err error - observerFactory, err = metrics.NewFactory(metricsServer, metricsProvider, 5*time.Second) + observerFactory, err = metrics.NewFactory(metricsServer, 5*time.Second) if err != nil { c.recordEventErrorf(r, "Error building Prometheus client for %s %v", r.Spec.MetricsServer, err) return false diff --git a/pkg/metrics/envoy_service.go b/pkg/metrics/envoy_service.go new file mode 100644 index 000000000..5e9d26c37 --- /dev/null +++ b/pkg/metrics/envoy_service.go @@ -0,0 +1,73 @@ +package metrics + +import ( + "time" +) + +var envoyServiceQueries = map[string]string{ + "request-success-rate": ` + sum( + rate( + envoy_cluster_upstream_rq{ + kubernetes_namespace="{{ .Namespace }}", + envoy_cluster_name="{{ .Name }}-canary", + envoy_response_code!~"5.*" + }[{{ .Interval }}] + ) + ) + / + sum( + rate( + envoy_cluster_upstream_rq{ + kubernetes_namespace="{{ .Namespace }}", + envoy_cluster_name="{{ .Name }}-canary" + }[{{ .Interval }}] + ) + ) + * 100`, + "request-duration": ` + histogram_quantile( + 0.99, + sum( + rate( + envoy_cluster_upstream_rq_time_bucket{ + kubernetes_namespace="{{ .Namespace }}", + envoy_cluster_name="{{ .Name }}-canary" + }[{{ .Interval }}] + ) + ) by (le) + )`, +} + +type EnvoyServiceObserver struct { + client *PrometheusClient +} + +func (ob *EnvoyServiceObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) { + query, err := ob.client.RenderQuery(name, namespace, interval, envoyServiceQueries["request-success-rate"]) + if err != nil { + return 0, err + } + + value, err := ob.client.RunQuery(query) + if err != nil { + return 0, err + } + + return value, nil +} + +func (ob *EnvoyServiceObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) { + query, err := ob.client.RenderQuery(name, namespace, interval, envoyServiceQueries["request-duration"]) + if err != nil { + return 0, err + } + + value, err := ob.client.RunQuery(query) + if err != nil { + return 0, err + } + + ms := time.Duration(int64(value)) * time.Millisecond + return ms, nil +} diff --git a/pkg/metrics/envoy_service_test.go b/pkg/metrics/envoy_service_test.go new file mode 100644 index 000000000..fca854bd0 --- /dev/null +++ b/pkg/metrics/envoy_service_test.go @@ -0,0 +1,74 @@ +package metrics + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestEnvoyServiceObserver_GetRequestSuccessRate(t *testing.T) { + expected := ` sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name="podinfo-canary", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name="podinfo-canary" }[1m] ) ) * 100` + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + promql := r.URL.Query()["query"][0] + if promql != expected { + t.Errorf("\nGot %s \nWanted %s", promql, expected) + } + + json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}` + w.Write([]byte(json)) + })) + defer ts.Close() + + client, err := NewPrometheusClient(ts.URL, time.Second) + if err != nil { + t.Fatal(err) + } + + observer := &EnvoyServiceObserver{ + client: client, + } + + val, err := observer.GetRequestSuccessRate("podinfo", "default", "1m") + if err != nil { + t.Fatal(err.Error()) + } + + if val != 100 { + t.Errorf("Got %v wanted %v", val, 100) + } +} + +func TestEnvoyServiceObserver_GetRequestDuration(t *testing.T) { + expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="default", envoy_cluster_name="podinfo-canary" }[1m] ) ) by (le) )` + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + promql := r.URL.Query()["query"][0] + if promql != expected { + t.Errorf("\nGot %s \nWanted %s", promql, expected) + } + + json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}` + w.Write([]byte(json)) + })) + defer ts.Close() + + client, err := NewPrometheusClient(ts.URL, time.Second) + if err != nil { + t.Fatal(err) + } + + observer := &EnvoyServiceObserver{ + client: client, + } + + val, err := observer.GetRequestDuration("podinfo", "default", "1m") + if err != nil { + t.Fatal(err.Error()) + } + + if val != 100*time.Millisecond { + t.Errorf("Got %v wanted %v", val, 100*time.Millisecond) + } +} diff --git a/pkg/metrics/factory.go b/pkg/metrics/factory.go index e2b69b8d8..ce6d85d07 100644 --- a/pkg/metrics/factory.go +++ b/pkg/metrics/factory.go @@ -6,19 +6,17 @@ import ( ) type Factory struct { - MeshProvider string - Client *PrometheusClient + Client *PrometheusClient } -func NewFactory(metricsServer string, meshProvider string, timeout time.Duration) (*Factory, error) { +func NewFactory(metricsServer string, timeout time.Duration) (*Factory, error) { client, err := NewPrometheusClient(metricsServer, timeout) if err != nil { return nil, err } return &Factory{ - MeshProvider: meshProvider, - Client: client, + Client: client, }, nil } @@ -32,7 +30,7 @@ func (factory Factory) Observer(provider string) Interface { return &HttpObserver{ client: factory.Client, } - case provider == "appmesh": + case provider == "appmesh", provider == "envoy": return &EnvoyObserver{ client: factory.Client, } @@ -44,8 +42,8 @@ func (factory Factory) Observer(provider string) Interface { return &GlooObserver{ client: factory.Client, } - case provider == "smi:linkerd": - return &LinkerdObserver{ + case provider == "appmesh:service", provider == "envoy:service": + return &EnvoyServiceObserver{ client: factory.Client, } case provider == "linkerd": From a828524957d7cb87d5b39858ece257611ea06688 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Sat, 30 Nov 2019 12:40:53 +0900 Subject: [PATCH 02/13] Add the guide for using Envoy and Crossover for Deployment targets Ref #385 --- README.md | 1 + charts/grafana/dashboards/envoy.json | 1248 +++++++++++++++++ .../usage/envoy-progressive-delivery.md | 300 ++++ 3 files changed, 1549 insertions(+) create mode 100644 charts/grafana/dashboards/envoy.json create mode 100644 docs/gitbook/usage/envoy-progressive-delivery.md diff --git a/README.md b/README.md index 95b16c42f..0c7dde7ea 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,7 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap * [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery) * [Linkerd canary deployments](https://docs.flagger.app/usage/linkerd-progressive-delivery) * [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery) + * [Envoy canary deployments](https://docs.flagger.app/usage/envoy-progressive-delivery) * [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery) * [Gloo ingress controller canary deployments](https://docs.flagger.app/usage/gloo-progressive-delivery) * [Blue/Green deployments](https://docs.flagger.app/usage/blue-green) diff --git a/charts/grafana/dashboards/envoy.json b/charts/grafana/dashboards/envoy.json new file mode 100644 index 000000000..9a2cd5784 --- /dev/null +++ b/charts/grafana/dashboards/envoy.json @@ -0,0 +1,1248 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 2, + "iteration": 1553160305729, + "links": [], + "panels": [ + { + "content": "
\nRED: $canary.$namespace\n
", + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 89, + "links": [], + "mode": "html", + "title": "", + "transparent": true, + "type": "text" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 3 + }, + "id": 90, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "round(sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$primary\"}[30s])), 0.001)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Primary: Incoming Request Volume", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "decimals": null, + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 80, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 6, + "y": 3 + }, + "id": 98, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$primary\",envoy_response_code!~\"5.*\"}[30s])) / sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$primary\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": "95, 99, 99.5", + "title": "Incoming Success Rate", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 3 + }, + "id": 97, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(40, 224, 65, 0.18)", + "full": true, + "lineColor": "#7eb26d", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "round(sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$canary\"}[30s])), 0.001)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Canary: Incoming Request Volume", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "decimals": null, + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 80, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 3 + }, + "id": 99, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(40, 224, 65, 0.18)", + "full": true, + "lineColor": "#7eb26d", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$canary\",envoy_response_code!~\"5.*\"}[30s])) / sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$canary\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": "95, 99, 99.5", + "title": "Incoming Success Rate", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 4, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 96, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(rate(envoy_cluster_upstream_cx_rx_bytes_total{kubernetes_namespace=~\"$namespace\",app=~\"$primary\"}[30s])))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "traffic", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Primary: Incoming Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 4, + "w": 12, + "x": 12, + "y": 7 + }, + "id": 91, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(rate(envoy_cluster_upstream_cx_rx_bytes_total{kubernetes_namespace=~\"$namespace\",app=~\"$canary\"}[30s])))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "traffic", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Canary: Incoming Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "content": "
\nUSE: $canary.$namespace\n
", + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 101, + "links": [], + "mode": "html", + "title": "", + "transparent": true, + "type": "text" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 100, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Primary: CPU Usage by Pod", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": "CPU seconds / second", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 14 + }, + "id": 102, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Canary: CPU Usage by Pod", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": "CPU seconds / second", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 103, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Primary: Memory Usage by Pod", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "bytes", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 104, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Canary: Memory Usage by Pod", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "bytes", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 26 + }, + "id": 105, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "received", + "color": "#f9d9f9" + }, + { + "alias": "transmited", + "color": "#f29191" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m])) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "received", + "refId": "A" + }, + { + "expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "transmited", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Primary: Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 26 + }, + "id": 106, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "received", + "color": "#f9d9f9" + }, + { + "alias": "transmited", + "color": "#f29191" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m])) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "received", + "refId": "A" + }, + { + "expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "transmited", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Canary: Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "10s", + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": null, + "datasource": "prometheus", + "definition": "query_result(sum(envoy_cluster_upstream_rq) by (kubernetes_namespace))", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": "query_result(sum(envoy_cluster_upstream_rq) by (kubernetes_namespace))", + "refresh": 1, + "regex": "/.*_namespace=\"([^\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": null, + "datasource": "prometheus", + "definition": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app=~\".*-primary\"}) by (app))", + "hide": 0, + "includeAll": false, + "label": "Primary", + "multi": false, + "name": "primary", + "options": [], + "query": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app=~\".*-primary\"}) by (app))", + "refresh": 1, + "regex": "/.*app=\"([^\"]*).*/", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": null, + "datasource": "prometheus", + "definition": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app!~\".*-primary\"}) by (app))", + "hide": 0, + "includeAll": false, + "label": "Canary", + "multi": false, + "name": "canary", + "options": [], + "query": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app!~\".*-primary\"}) by (app))", + "refresh": 1, + "regex": "/.*app=\"([^\"]*).*/", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Envoy Canary", + "uid": "flagger-envoy", + "version": 4 +} \ No newline at end of file diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/envoy-progressive-delivery.md new file mode 100644 index 000000000..60a8ce67c --- /dev/null +++ b/docs/gitbook/usage/envoy-progressive-delivery.md @@ -0,0 +1,300 @@ +# Envoy Canary Deployments + +This guide shows you how to use Envoy and Flagger to automate canary deployments. + +### Prerequisites + +Flagger requires a Kubernetes cluster **v1.11** or newer and Envoy paired with [Crossover](https://github.com/mumoshu/crossover) sidecar. + +Create a test namespace: + +```bash +kubectl create ns test +``` + +Install Envoy along with the sidecar with Helm: + +```bash +git clone https://github.com/mumoshu/crossover.git + +cd crossover + +helm upgrade --install envoy stable/envoy \ + --namespace test \ + -f example/values.yaml \ + -f example/values.services.yaml \ + --set services.podinfo.smi.enabled=true +``` + +Install Flagger and the Prometheus add-on in the same namespace as Envoy: + +```bash +helm repo add flagger https://flagger.app + +helm upgrade -i flagger flagger/flagger \ +--namespace test \ +--set prometheus.install=true \ +--set meshProvider=smi:envoy +``` + +Optionally you can enable Slack notifications: + +```bash +helm upgrade -i flagger flagger/flagger \ +--reuse-values \ +--namespace test \ +--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \ +--set slack.channel=general \ +--set slack.user=flagger +``` + +### Bootstrap + +Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA), +then creates a series of objects (Kubernetes deployments, ClusterIP services, SMI traffic splits). +These objects expose the application on the mesh and drive the canary analysis and promotion. +There's no SMI object you need to create by yourself. + +Create a deployment and a horizontal pod autoscaler: + +```bash +kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo +``` + +Deploy the load testing service to generate traffic during the canary analysis: + +```bash +helm upgrade -i flagger-loadtester flagger/loadtester \ +--namespace=test \ +--set meshName=global \ +--set "backends[0]=podinfo.test" \ +--set "backends[1]=podinfo-canary.test" +``` + +Create a canary custom resource: + +```yaml +apiVersion: flagger.app/v1alpha3 +kind: Canary +metadata: + name: podinfo + namespace: test +spec: + # specify mesh provider if it isn't the default one + # provider: "envoy:smi" + # deployment reference + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: podinfo + # the maximum time in seconds for the canary deployment + # to make progress before it is rollback (default 600s) + progressDeadlineSeconds: 60 + # HPA reference (optional) + autoscalerRef: + apiVersion: autoscaling/v2beta1 + kind: HorizontalPodAutoscaler + name: podinfo + service: + # ClusterIP port number + port: 9898 + # container port number or name (optional) + targetPort: 9898 + # define the canary analysis timing and KPIs + canaryAnalysis: + # schedule interval (default 60s) + interval: 1m + # max number of failed metric checks before rollback + threshold: 5 + # max traffic percentage routed to canary + # percentage (0-100) + maxWeight: 50 + # canary increment step + # percentage (0-100) + stepWeight: 5 + # App Mesh Prometheus checks + metrics: + - name: request-success-rate + # minimum req success rate (non 5xx responses) + # percentage (0-100) + threshold: 99 + interval: 1m + - name: request-duration + # maximum req duration P99 + # milliseconds + threshold: 500 + interval: 30s + # testing (optional) + webhooks: + - name: acceptance-test + type: pre-rollout + url: http://flagger-loadtester.test/ + timeout: 30s + metadata: + type: bash + cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token" + - name: load-test + url: http://flagger-loadtester.test/ + timeout: 5s + metadata: + cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/" +``` + +Save the above resource as podinfo-canary.yaml and then apply it: + +```bash +kubectl apply -f ./podinfo-canary.yaml +``` + +After a couple of seconds Flagger will create the canary objects: + +```bash +# applied +deployment.apps/podinfo +horizontalpodautoscaler.autoscaling/podinfo +canary.flagger.app/podinfo + +# generated +deployment.apps/podinfo-primary +horizontalpodautoscaler.autoscaling/podinfo-primary +service/podinfo +service/podinfo-canary +service/podinfo-primary +trafficsplits.split.smi-spec.io/podinfo +``` + +After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed +to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods. + +### Automated canary promotion + +Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack. + +![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png) + +A canary deployment is triggered by changes in any of the following objects: + +* Deployment PodSpec (container image, command, ports, env, resources, etc) +* ConfigMaps and Secrets mounted as volumes or mapped to environment variables + +Trigger a canary deployment by updating the container image: + +```bash +kubectl -n test set image deployment/podinfo \ +podinfod=stefanprodan/podinfo:3.1.5 +``` + +Flagger detects that the deployment revision changed and starts a new rollout: + +```text +kubectl -n test describe canary/podinfo + +Status: + Canary Weight: 0 + Failed Checks: 0 + Phase: Succeeded +Events: + New revision detected! Scaling up podinfo.test + Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available + Pre-rollout check acceptance-test passed + Advance podinfo.test canary weight 5 + Advance podinfo.test canary weight 10 + Advance podinfo.test canary weight 15 + Advance podinfo.test canary weight 20 + Advance podinfo.test canary weight 25 + Advance podinfo.test canary weight 30 + Advance podinfo.test canary weight 35 + Advance podinfo.test canary weight 40 + Advance podinfo.test canary weight 45 + Advance podinfo.test canary weight 50 + Copying podinfo.test template spec to podinfo-primary.test + Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available + Routing all traffic to primary + Promotion completed! Scaling down podinfo.test +``` + +When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. + +**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis. + +During the analysis the canary’s progress can be monitored with Grafana. + +Run: + +```bash +kubectl port-forward svc/flagger-grafana 3000 +``` + +The Envoy dashboard URL is +http://localhost:3000/d/flagger-envoy/envoy-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo + +![Envoy Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/flagger-grafana-appmesh.png) + +You can monitor all canaries with: + +```bash +watch kubectl get canaries --all-namespaces + +NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME +test podinfo Progressing 15 2019-10-02T14:05:07Z +prod frontend Succeeded 0 2019-10-02T16:15:07Z +prod backend Failed 0 2019-10-02T17:05:07Z +``` + +If you’ve enabled the Slack notifications, you should receive the following messages: + +![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png) + +### Automated rollback + +During the canary analysis you can generate HTTP 500 errors or high latency to test if Flagger pauses the rollout. + +Trigger a canary deployment: + +```bash +kubectl -n test set image deployment/podinfo \ +podinfod=stefanprodan/podinfo:3.1.2 +``` + +Exec into the load tester pod with: + +```bash +kubectl -n test exec -it deploy/flagger-loadtester bash +``` + +Generate HTTP 500 errors: + +```bash +hey -z 1m -c 5 -q 5 http://podinfo-canary.test:9898/status/500 +``` + +Generate latency: + +```bash +watch -n 1 curl http://podinfo-canary.test:9898/delay/1 +``` + +When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, +the canary is scaled to zero and the rollout is marked as failed. + +```text +kubectl -n test logs deploy/flagger -f | jq .msg + +New revision detected! Starting canary analysis for podinfo.test +Pre-rollout check acceptance-test passed +Advance podinfo.test canary weight 5 +Advance podinfo.test canary weight 10 +Advance podinfo.test canary weight 15 +Halt podinfo.test advancement success rate 69.17% < 99% +Halt podinfo.test advancement success rate 61.39% < 99% +Halt podinfo.test advancement success rate 55.06% < 99% +Halt podinfo.test advancement request duration 1.20s > 0.5s +Halt podinfo.test advancement request duration 1.45s > 0.5s +Rolling back podinfo.test failed checks threshold reached 5 +Canary failed! Scaling down podinfo.test +``` + +If you’ve enabled the Slack notifications, you’ll receive a message if the progress deadline is exceeded, +or if the analysis reached the maximum number of failed checks: + +![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png) From 08851f83c7d16af0a18064ae929fcc58c0b1c3f1 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Sun, 1 Dec 2019 23:25:29 +0900 Subject: [PATCH 03/13] Make envoy + crossover installation a bit more understandable --- docs/gitbook/usage/envoy-progressive-delivery.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/envoy-progressive-delivery.md index 60a8ce67c..39beec704 100644 --- a/docs/gitbook/usage/envoy-progressive-delivery.md +++ b/docs/gitbook/usage/envoy-progressive-delivery.md @@ -22,8 +22,20 @@ cd crossover helm upgrade --install envoy stable/envoy \ --namespace test \ -f example/values.yaml \ - -f example/values.services.yaml \ - --set services.podinfo.smi.enabled=true + -f <(cat < Date: Sun, 1 Dec 2019 23:29:21 +0900 Subject: [PATCH 04/13] Fix loadtester config in the envoy doc --- docs/gitbook/usage/envoy-progressive-delivery.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/envoy-progressive-delivery.md index 39beec704..0a9292b73 100644 --- a/docs/gitbook/usage/envoy-progressive-delivery.md +++ b/docs/gitbook/usage/envoy-progressive-delivery.md @@ -77,10 +77,7 @@ Deploy the load testing service to generate traffic during the canary analysis: ```bash helm upgrade -i flagger-loadtester flagger/loadtester \ ---namespace=test \ ---set meshName=global \ ---set "backends[0]=podinfo.test" \ ---set "backends[1]=podinfo-canary.test" +--namespace=test ``` Create a canary custom resource: From 020129bf5c13df616541a4eed091de9c340b087b Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Tue, 17 Dec 2019 15:45:16 +0900 Subject: [PATCH 05/13] Fix misconfiguration --- docs/gitbook/usage/envoy-progressive-delivery.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/envoy-progressive-delivery.md index 0a9292b73..e3306b5ee 100644 --- a/docs/gitbook/usage/envoy-progressive-delivery.md +++ b/docs/gitbook/usage/envoy-progressive-delivery.md @@ -30,10 +30,10 @@ services: backends: podinfo-primary: port: 9898 - weight: 50 + weight: 100 podinfo-canary: port: 9898 - weight: 50 + weight: 0 EOF ) ``` @@ -90,7 +90,7 @@ metadata: namespace: test spec: # specify mesh provider if it isn't the default one - # provider: "envoy:smi" + # provider: "smi:envoy" # deployment reference targetRef: apiVersion: apps/v1 From 1e5d05c3fc9ca028ecbdc82d200332567a10ac3e Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Tue, 17 Dec 2019 17:02:50 +0900 Subject: [PATCH 06/13] Improve Envoy/Crossover installation experience with the chart registry --- docs/gitbook/usage/envoy-progressive-delivery.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/envoy-progressive-delivery.md index e3306b5ee..f91d78863 100644 --- a/docs/gitbook/usage/envoy-progressive-delivery.md +++ b/docs/gitbook/usage/envoy-progressive-delivery.md @@ -15,13 +15,10 @@ kubectl create ns test Install Envoy along with the sidecar with Helm: ```bash -git clone https://github.com/mumoshu/crossover.git +helm repo add crossover https://mumoshu.github.io/crossover -cd crossover - -helm upgrade --install envoy stable/envoy \ +helm upgrade --install envoy crossover/envoy \ --namespace test \ - -f example/values.yaml \ -f <(cat < Date: Tue, 17 Dec 2019 20:18:33 +0900 Subject: [PATCH 07/13] Fix the dashboard and the steps to browse it --- charts/grafana/dashboards/envoy.json | 2 +- docs/gitbook/usage/envoy-progressive-delivery.md | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/charts/grafana/dashboards/envoy.json b/charts/grafana/dashboards/envoy.json index 9a2cd5784..de502323d 100644 --- a/charts/grafana/dashboards/envoy.json +++ b/charts/grafana/dashboards/envoy.json @@ -15,7 +15,7 @@ "editable": true, "gnetId": null, "graphTooltip": 0, - "id": 2, + "id": 3, "iteration": 1553160305729, "links": [], "panels": [ diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/envoy-progressive-delivery.md index f91d78863..ee0a804e3 100644 --- a/docs/gitbook/usage/envoy-progressive-delivery.md +++ b/docs/gitbook/usage/envoy-progressive-delivery.md @@ -225,10 +225,18 @@ When the canary analysis starts, Flagger will call the pre-rollout webhooks befo During the analysis the canary’s progress can be monitored with Grafana. +Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana with Helm: + +```bash +helm upgrade -i flagger-grafana flagger/grafana \ +--namespace=test \ +--set url=http://flagger-prometheus:9090 +``` + Run: ```bash -kubectl port-forward svc/flagger-grafana 3000 +kubectl port-forward --namespace test svc/flagger-grafana 3000:80 ``` The Envoy dashboard URL is From 52856177e3847efcbe97f602a2bbc84e6196f45f Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Wed, 18 Dec 2019 09:03:41 +0900 Subject: [PATCH 08/13] Fix trafficsplits api version for envoy+crossover --- docs/gitbook/usage/envoy-progressive-delivery.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/envoy-progressive-delivery.md index ee0a804e3..670d1b891 100644 --- a/docs/gitbook/usage/envoy-progressive-delivery.md +++ b/docs/gitbook/usage/envoy-progressive-delivery.md @@ -20,6 +20,9 @@ helm repo add crossover https://mumoshu.github.io/crossover helm upgrade --install envoy crossover/envoy \ --namespace test \ -f <(cat < Date: Wed, 18 Dec 2019 09:05:37 +0900 Subject: [PATCH 09/13] Do use correct envoy metrics for canary analysis --- pkg/metrics/envoy.go | 6 +++--- pkg/metrics/envoy_test.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/metrics/envoy.go b/pkg/metrics/envoy.go index ce88b5407..ce1870ce9 100644 --- a/pkg/metrics/envoy.go +++ b/pkg/metrics/envoy.go @@ -10,7 +10,7 @@ var envoyQueries = map[string]string{ rate( envoy_cluster_upstream_rq{ kubernetes_namespace="{{ .Namespace }}", - kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", + envoy_cluster_name=~"{{ .Name }}-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", envoy_response_code!~"5.*" }[{{ .Interval }}] ) @@ -20,7 +20,7 @@ var envoyQueries = map[string]string{ rate( envoy_cluster_upstream_rq{ kubernetes_namespace="{{ .Namespace }}", - kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" + envoy_cluster_name=~"{{ .Name }}-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[{{ .Interval }}] ) ) @@ -32,7 +32,7 @@ var envoyQueries = map[string]string{ rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="{{ .Namespace }}", - kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" + envoy_cluster_name=~"{{ .Name }}-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[{{ .Interval }}] ) ) by (le) diff --git a/pkg/metrics/envoy_test.go b/pkg/metrics/envoy_test.go index 442b59e7e..8b013e05f 100644 --- a/pkg/metrics/envoy_test.go +++ b/pkg/metrics/envoy_test.go @@ -8,7 +8,7 @@ import ( ) func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) { - expected := ` sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) * 100` + expected := ` sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) * 100` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { promql := r.URL.Query()["query"][0] @@ -41,7 +41,7 @@ func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) { } func TestEnvoyObserver_GetRequestDuration(t *testing.T) { - expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) by (le) )` + expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) by (le) )` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { promql := r.URL.Query()["query"][0] From 806b95c8ceea5e43218315397a1e5c530f548b63 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Wed, 18 Dec 2019 09:06:22 +0900 Subject: [PATCH 10/13] Do send http requests only to canary for canary analysis --- docs/gitbook/usage/envoy-progressive-delivery.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/envoy-progressive-delivery.md index 670d1b891..2074a7d31 100644 --- a/docs/gitbook/usage/envoy-progressive-delivery.md +++ b/docs/gitbook/usage/envoy-progressive-delivery.md @@ -146,7 +146,7 @@ spec: url: http://flagger-loadtester.test/ timeout: 5s metadata: - cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/" + cmd: "hey -z 1m -q 10 -c 2 http://envoy.test:10000/" ``` Save the above resource as podinfo-canary.yaml and then apply it: @@ -282,13 +282,13 @@ kubectl -n test exec -it deploy/flagger-loadtester bash Generate HTTP 500 errors: ```bash -hey -z 1m -c 5 -q 5 http://podinfo-canary.test:9898/status/500 +hey -z 1m -c 5 -q 5 http://envoy.test:10000/status/500 ``` Generate latency: ```bash -watch -n 1 curl http://podinfo-canary.test:9898/delay/1 +watch -n 1 curl http://envoy.test:10000/delay/1 ``` When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, From d75ade5e8ce071321f6d9a57fb79beb35d6d2b7b Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Wed, 18 Dec 2019 10:55:49 +0900 Subject: [PATCH 11/13] Fix envoy dashboard, scheduler, and envoy metrics provider to correctly pass canary analysis and show graphs --- charts/grafana/dashboards/envoy.json | 66 +++++++------------ .../usage/envoy-progressive-delivery.md | 2 +- pkg/controller/scheduler.go | 23 ++++--- pkg/metrics/envoy.go | 6 +- pkg/metrics/envoy_test.go | 4 +- 5 files changed, 41 insertions(+), 60 deletions(-) diff --git a/charts/grafana/dashboards/envoy.json b/charts/grafana/dashboards/envoy.json index de502323d..46b27a66b 100644 --- a/charts/grafana/dashboards/envoy.json +++ b/charts/grafana/dashboards/envoy.json @@ -20,7 +20,7 @@ "links": [], "panels": [ { - "content": "
\nRED: $canary.$namespace\n
", + "content": "
\nRED: $target.$namespace\n
", "gridPos": { "h": 3, "w": 24, @@ -95,7 +95,7 @@ "tableColumn": "", "targets": [ { - "expr": "round(sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$primary\"}[30s])), 0.001)", + "expr": "round(sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",envoy_cluster_name=~\"$target-primary\"}[30s])), 0.001)", "format": "time_series", "intervalFactor": 1, "legendFormat": "", @@ -179,7 +179,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$primary\",envoy_response_code!~\"5.*\"}[30s])) / sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$primary\"}[30s]))", + "expr": "sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",envoy_cluster_name=~\"$target-primary\",envoy_response_code!~\"5.*\"}[30s])) / sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",envoy_cluster_name=~\"$target-primary\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "", @@ -261,7 +261,7 @@ "tableColumn": "", "targets": [ { - "expr": "round(sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$canary\"}[30s])), 0.001)", + "expr": "round(sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",envoy_cluster_name=~\"$target-canary\"}[30s])), 0.001)", "format": "time_series", "intervalFactor": 1, "legendFormat": "", @@ -345,7 +345,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$canary\",envoy_response_code!~\"5.*\"}[30s])) / sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",app=~\"$canary\"}[30s]))", + "expr": "sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",envoy_cluster_name=~\"$target-canary\",envoy_response_code!~\"5.*\"}[30s])) / sum(irate(envoy_cluster_upstream_rq{kubernetes_namespace=~\"$namespace\",envoy_cluster_name=~\"$target-canary\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "", @@ -407,7 +407,7 @@ "steppedLine": false, "targets": [ { - "expr": "(sum(rate(envoy_cluster_upstream_cx_rx_bytes_total{kubernetes_namespace=~\"$namespace\",app=~\"$primary\"}[30s])))", + "expr": "(sum(rate(envoy_cluster_upstream_cx_rx_bytes_total{kubernetes_namespace=~\"$namespace\",envoy_cluster_name=~\"$target-primary\"}[30s])))", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -497,7 +497,7 @@ "steppedLine": false, "targets": [ { - "expr": "(sum(rate(envoy_cluster_upstream_cx_rx_bytes_total{kubernetes_namespace=~\"$namespace\",app=~\"$canary\"}[30s])))", + "expr": "(sum(rate(envoy_cluster_upstream_cx_rx_bytes_total{kubernetes_namespace=~\"$namespace\",envoy_cluster_name=~\"$target-canary\"}[30s])))", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -547,7 +547,7 @@ } }, { - "content": "
\nUSE: $canary.$namespace\n
", + "content": "
\nUSE: $target.$namespace\n
", "gridPos": { "h": 3, "w": 24, @@ -602,7 +602,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)", + "expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$target-primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -692,7 +692,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)", + "expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$target.*\", pod_name!~\"$target-primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -782,7 +782,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)", + "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$target-primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)", "format": "time_series", "hide": false, "interval": "", @@ -874,7 +874,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)", + "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$target.*\", pod_name!~\"$target-primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)", "format": "time_series", "hide": false, "interval": "", @@ -975,14 +975,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m])) ", + "expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$target-primary.*\"}[1m])) ", "format": "time_series", "intervalFactor": 1, "legendFormat": "received", "refId": "A" }, { - "expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m]))", + "expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$target-primary.*\"}[1m]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "transmited", @@ -1081,14 +1081,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m])) ", + "expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$target.*\",pod_name!~\"$target-primary.*\"}[1m])) ", "format": "time_series", "intervalFactor": 1, "legendFormat": "received", "refId": "A" }, { - "expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m]))", + "expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$target.*\",pod_name!~\"$target-primary.*\"}[1m]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "transmited", @@ -1170,38 +1170,16 @@ "allValue": null, "current": null, "datasource": "prometheus", - "definition": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app=~\".*-primary\"}) by (app))", + "definition": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",envoy_cluster_name=~\".*-primary\"}) by (envoy_cluster_name))", "hide": 0, "includeAll": false, - "label": "Primary", + "label": "Target", "multi": false, - "name": "primary", + "name": "target", "options": [], - "query": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app=~\".*-primary\"}) by (app))", + "query": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",envoy_cluster_name=~\".*-primary\"}) by (envoy_cluster_name))", "refresh": 1, - "regex": "/.*app=\"([^\"]*).*/", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": null, - "datasource": "prometheus", - "definition": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app!~\".*-primary\"}) by (app))", - "hide": 0, - "includeAll": false, - "label": "Canary", - "multi": false, - "name": "canary", - "options": [], - "query": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app!~\".*-primary\"}) by (app))", - "refresh": 1, - "regex": "/.*app=\"([^\"]*).*/", + "regex": "/.*envoy_cluster_name=\"(.*)-primary\"/", "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", @@ -1244,5 +1222,5 @@ "timezone": "", "title": "Envoy Canary", "uid": "flagger-envoy", - "version": 4 + "version": 6 } \ No newline at end of file diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/envoy-progressive-delivery.md index 2074a7d31..ee7a8f6f9 100644 --- a/docs/gitbook/usage/envoy-progressive-delivery.md +++ b/docs/gitbook/usage/envoy-progressive-delivery.md @@ -243,7 +243,7 @@ kubectl port-forward --namespace test svc/flagger-grafana 3000:80 ``` The Envoy dashboard URL is -http://localhost:3000/d/flagger-envoy/envoy-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo +http://localhost:3000/d/flagger-envoy/envoy-canary?refresh=10s&orgId=1&var-namespace=test&var-target=podinfo ![Envoy Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/flagger-grafana-appmesh.png) diff --git a/pkg/controller/scheduler.go b/pkg/controller/scheduler.go index ec16b6023..7768daac4 100644 --- a/pkg/controller/scheduler.go +++ b/pkg/controller/scheduler.go @@ -747,7 +747,15 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool { } // override the global provider if one is specified in the canary spec - metricsProvider := c.meshProvider + var metricsProvider string + // set the metrics provider to Envoy Prometheus when Envoy is the mesh provider + // For example, `envoy` metrics provider should be used for `smi:envoy` mesh provider + if strings.Contains(c.meshProvider, "envoy") { + metricsProvider = "envoy" + } else { + metricsProvider = c.meshProvider + } + if r.Spec.Provider != "" { metricsProvider = r.Spec.Provider @@ -755,11 +763,6 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool { if strings.Contains(c.meshProvider, "linkerd") { metricsProvider = "linkerd" } - - // set the metrics provider to Envoy Prometheus when Envoy is the default mesh provider - if strings.Contains(c.meshProvider, "envoy") { - metricsProvider = "envoy" - } } // set the metrics provider to query Prometheus for the canary Kubernetes service if the canary target is Service if r.Spec.TargetRef.Kind == "Service" { @@ -792,8 +795,8 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool { val, err := observer.GetRequestSuccessRate(r.Spec.TargetRef.Name, r.Namespace, metric.Interval) if err != nil { if strings.Contains(err.Error(), "no values found") { - c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic", - metric.Name, r.Spec.TargetRef.Name, r.Namespace) + c.recordEventWarningf(r, "Halt advancement no values found for %s metric %s probably %s.%s is not receiving traffic", + metricsProvider, metric.Name, r.Spec.TargetRef.Name, r.Namespace) } else { c.recordEventErrorf(r, "Metrics server %s query failed: %v", metricsServer, err) } @@ -812,8 +815,8 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool { val, err := observer.GetRequestDuration(r.Spec.TargetRef.Name, r.Namespace, metric.Interval) if err != nil { if strings.Contains(err.Error(), "no values found") { - c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic", - metric.Name, r.Spec.TargetRef.Name, r.Namespace) + c.recordEventWarningf(r, "Halt advancement no values found for %s metric %s probably %s.%s is not receiving traffic", + metricsProvider, metric.Name, r.Spec.TargetRef.Name, r.Namespace) } else { c.recordEventErrorf(r, "Metrics server %s query failed: %v", metricsServer, err) } diff --git a/pkg/metrics/envoy.go b/pkg/metrics/envoy.go index ce1870ce9..bf568b7a2 100644 --- a/pkg/metrics/envoy.go +++ b/pkg/metrics/envoy.go @@ -10,7 +10,7 @@ var envoyQueries = map[string]string{ rate( envoy_cluster_upstream_rq{ kubernetes_namespace="{{ .Namespace }}", - envoy_cluster_name=~"{{ .Name }}-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", + envoy_cluster_name=~"{{ .Name }}-canary", envoy_response_code!~"5.*" }[{{ .Interval }}] ) @@ -20,7 +20,7 @@ var envoyQueries = map[string]string{ rate( envoy_cluster_upstream_rq{ kubernetes_namespace="{{ .Namespace }}", - envoy_cluster_name=~"{{ .Name }}-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" + envoy_cluster_name=~"{{ .Name }}-canary" }[{{ .Interval }}] ) ) @@ -32,7 +32,7 @@ var envoyQueries = map[string]string{ rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="{{ .Namespace }}", - envoy_cluster_name=~"{{ .Name }}-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" + envoy_cluster_name=~"{{ .Name }}-canary" }[{{ .Interval }}] ) ) by (le) diff --git a/pkg/metrics/envoy_test.go b/pkg/metrics/envoy_test.go index 8b013e05f..3852fd350 100644 --- a/pkg/metrics/envoy_test.go +++ b/pkg/metrics/envoy_test.go @@ -8,7 +8,7 @@ import ( ) func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) { - expected := ` sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) * 100` + expected := ` sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary" }[1m] ) ) * 100` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { promql := r.URL.Query()["query"][0] @@ -41,7 +41,7 @@ func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) { } func TestEnvoyObserver_GetRequestDuration(t *testing.T) { - expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) by (le) )` + expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary" }[1m] ) ) by (le) )` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { promql := r.URL.Query()["query"][0] From 357ef86c8b5ee43bd044aa801f8ef061b1ce9a4f Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Wed, 18 Dec 2019 22:03:30 +0900 Subject: [PATCH 12/13] Differentiate AppMesh observer vs Crossover observer To not break AppMesh integration. --- README.md | 2 +- ...y.md => crossover-progressive-delivery.md} | 20 ++--- pkg/controller/scheduler.go | 8 +- pkg/metrics/appmesh.go | 73 ++++++++++++++++++ pkg/metrics/appmesh_test.go | 74 +++++++++++++++++++ pkg/metrics/{envoy.go => crossover.go} | 12 +-- ...{envoy_service.go => crossover_service.go} | 12 +-- ...vice_test.go => crossover_service_test.go} | 8 +- .../{envoy_test.go => crossover_test.go} | 8 +- pkg/metrics/factory.go | 10 ++- 10 files changed, 190 insertions(+), 37 deletions(-) rename docs/gitbook/usage/{envoy-progressive-delivery.md => crossover-progressive-delivery.md} (93%) create mode 100644 pkg/metrics/appmesh.go create mode 100644 pkg/metrics/appmesh_test.go rename pkg/metrics/{envoy.go => crossover.go} (74%) rename pkg/metrics/{envoy_service.go => crossover_service.go} (72%) rename pkg/metrics/{envoy_service_test.go => crossover_service_test.go} (89%) rename pkg/metrics/{envoy_test.go => crossover_test.go} (90%) diff --git a/README.md b/README.md index 0c7dde7ea..7f6be2a4d 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap * [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery) * [Linkerd canary deployments](https://docs.flagger.app/usage/linkerd-progressive-delivery) * [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery) - * [Envoy canary deployments](https://docs.flagger.app/usage/envoy-progressive-delivery) + * [Crossover canary deployments](https://docs.flagger.app/usage/crossover-progressive-delivery) * [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery) * [Gloo ingress controller canary deployments](https://docs.flagger.app/usage/gloo-progressive-delivery) * [Blue/Green deployments](https://docs.flagger.app/usage/blue-green) diff --git a/docs/gitbook/usage/envoy-progressive-delivery.md b/docs/gitbook/usage/crossover-progressive-delivery.md similarity index 93% rename from docs/gitbook/usage/envoy-progressive-delivery.md rename to docs/gitbook/usage/crossover-progressive-delivery.md index ee7a8f6f9..1559f0fc9 100644 --- a/docs/gitbook/usage/envoy-progressive-delivery.md +++ b/docs/gitbook/usage/crossover-progressive-delivery.md @@ -1,6 +1,8 @@ -# Envoy Canary Deployments +# Envoy/Crossover Canary Deployments -This guide shows you how to use Envoy and Flagger to automate canary deployments. +This guide shows you how to use Envoy, [Crossover](https://github.com/mumoshu/crossover) and Flagger to automate canary deployments. + +Crossover is a minimal Envoy xDS implementation supports [Service Mesh Interface](https://smi-spec.io/). ### Prerequisites @@ -12,7 +14,7 @@ Create a test namespace: kubectl create ns test ``` -Install Envoy along with the sidecar with Helm: +Install Envoy along with the Crossover sidecar with Helm: ```bash helm repo add crossover https://mumoshu.github.io/crossover @@ -23,7 +25,7 @@ helm upgrade --install envoy crossover/envoy \ smi: apiVersions: trafficSplits: v1alpha1 -services: +upstreams: podinfo: smi: enabled: true @@ -46,7 +48,7 @@ helm repo add flagger https://flagger.app helm upgrade -i flagger flagger/flagger \ --namespace test \ --set prometheus.install=true \ ---set meshProvider=smi:envoy +--set meshProvider=smi:crossover ``` Optionally you can enable Slack notifications: @@ -90,7 +92,7 @@ metadata: namespace: test spec: # specify mesh provider if it isn't the default one - # provider: "smi:envoy" + # provider: "smi:crossover" # deployment reference targetRef: apiVersion: apps/v1 @@ -146,7 +148,7 @@ spec: url: http://flagger-loadtester.test/ timeout: 5s metadata: - cmd: "hey -z 1m -q 10 -c 2 http://envoy.test:10000/" + cmd: "hey -z 1m -q 10 -c 2 -H 'Host: podinfo.test' http://envoy.test:10000/" ``` Save the above resource as podinfo-canary.yaml and then apply it: @@ -282,13 +284,13 @@ kubectl -n test exec -it deploy/flagger-loadtester bash Generate HTTP 500 errors: ```bash -hey -z 1m -c 5 -q 5 http://envoy.test:10000/status/500 +hey -z 1m -c 5 -q 5 -H 'Host: podinfo.test' http://envoy.test:10000/status/500 ``` Generate latency: ```bash -watch -n 1 curl http://envoy.test:10000/delay/1 +watch -n 1 curl -H 'Host: podinfo.test' http://envoy.test:10000/delay/1 ``` When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, diff --git a/pkg/controller/scheduler.go b/pkg/controller/scheduler.go index 7768daac4..307832654 100644 --- a/pkg/controller/scheduler.go +++ b/pkg/controller/scheduler.go @@ -748,10 +748,10 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool { // override the global provider if one is specified in the canary spec var metricsProvider string - // set the metrics provider to Envoy Prometheus when Envoy is the mesh provider - // For example, `envoy` metrics provider should be used for `smi:envoy` mesh provider - if strings.Contains(c.meshProvider, "envoy") { - metricsProvider = "envoy" + // set the metrics provider to Crossover Prometheus when Crossover is the mesh provider + // For example, `crossover` metrics provider should be used for `smi:crossover` mesh provider + if strings.Contains(c.meshProvider, "crossover") { + metricsProvider = "crossover" } else { metricsProvider = c.meshProvider } diff --git a/pkg/metrics/appmesh.go b/pkg/metrics/appmesh.go new file mode 100644 index 000000000..f922c55a7 --- /dev/null +++ b/pkg/metrics/appmesh.go @@ -0,0 +1,73 @@ +package metrics + +import ( + "time" +) + +var appMeshQueries = map[string]string{ + "request-success-rate": ` + sum( + rate( + envoy_cluster_upstream_rq{ + kubernetes_namespace="{{ .Namespace }}", + kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", + envoy_response_code!~"5.*" + }[{{ .Interval }}] + ) + ) + / + sum( + rate( + envoy_cluster_upstream_rq{ + kubernetes_namespace="{{ .Namespace }}", + kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" + }[{{ .Interval }}] + ) + ) + * 100`, + "request-duration": ` + histogram_quantile( + 0.99, + sum( + rate( + envoy_cluster_upstream_rq_time_bucket{ + kubernetes_namespace="{{ .Namespace }}", + kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" + }[{{ .Interval }}] + ) + ) by (le) + )`, +} + +type AppMeshObserver struct { + client *PrometheusClient +} + +func (ob *AppMeshObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) { + query, err := ob.client.RenderQuery(name, namespace, interval, appMeshQueries["request-success-rate"]) + if err != nil { + return 0, err + } + + value, err := ob.client.RunQuery(query) + if err != nil { + return 0, err + } + + return value, nil +} + +func (ob *AppMeshObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) { + query, err := ob.client.RenderQuery(name, namespace, interval, appMeshQueries["request-duration"]) + if err != nil { + return 0, err + } + + value, err := ob.client.RunQuery(query) + if err != nil { + return 0, err + } + + ms := time.Duration(int64(value)) * time.Millisecond + return ms, nil +} diff --git a/pkg/metrics/appmesh_test.go b/pkg/metrics/appmesh_test.go new file mode 100644 index 000000000..471be5c4f --- /dev/null +++ b/pkg/metrics/appmesh_test.go @@ -0,0 +1,74 @@ +package metrics + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestAppMeshObserver_GetRequestSuccessRate(t *testing.T) { + expected := ` sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) * 100` + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + promql := r.URL.Query()["query"][0] + if promql != expected { + t.Errorf("\nGot %s \nWanted %s", promql, expected) + } + + json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}` + w.Write([]byte(json)) + })) + defer ts.Close() + + client, err := NewPrometheusClient(ts.URL, time.Second) + if err != nil { + t.Fatal(err) + } + + observer := &AppMeshObserver{ + client: client, + } + + val, err := observer.GetRequestSuccessRate("podinfo", "default", "1m") + if err != nil { + t.Fatal(err.Error()) + } + + if val != 100 { + t.Errorf("Got %v wanted %v", val, 100) + } +} + +func TestAppMeshObserver_GetRequestDuration(t *testing.T) { + expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) by (le) )` + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + promql := r.URL.Query()["query"][0] + if promql != expected { + t.Errorf("\nGot %s \nWanted %s", promql, expected) + } + + json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}` + w.Write([]byte(json)) + })) + defer ts.Close() + + client, err := NewPrometheusClient(ts.URL, time.Second) + if err != nil { + t.Fatal(err) + } + + observer := &AppMeshObserver{ + client: client, + } + + val, err := observer.GetRequestDuration("podinfo", "default", "1m") + if err != nil { + t.Fatal(err.Error()) + } + + if val != 100*time.Millisecond { + t.Errorf("Got %v wanted %v", val, 100*time.Millisecond) + } +} diff --git a/pkg/metrics/envoy.go b/pkg/metrics/crossover.go similarity index 74% rename from pkg/metrics/envoy.go rename to pkg/metrics/crossover.go index bf568b7a2..54a5e2908 100644 --- a/pkg/metrics/envoy.go +++ b/pkg/metrics/crossover.go @@ -4,7 +4,7 @@ import ( "time" ) -var envoyQueries = map[string]string{ +var crossoverQueries = map[string]string{ "request-success-rate": ` sum( rate( @@ -39,12 +39,12 @@ var envoyQueries = map[string]string{ )`, } -type EnvoyObserver struct { +type CrossoverObserver struct { client *PrometheusClient } -func (ob *EnvoyObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) { - query, err := ob.client.RenderQuery(name, namespace, interval, envoyQueries["request-success-rate"]) +func (ob *CrossoverObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) { + query, err := ob.client.RenderQuery(name, namespace, interval, crossoverQueries["request-success-rate"]) if err != nil { return 0, err } @@ -57,8 +57,8 @@ func (ob *EnvoyObserver) GetRequestSuccessRate(name string, namespace string, in return value, nil } -func (ob *EnvoyObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) { - query, err := ob.client.RenderQuery(name, namespace, interval, envoyQueries["request-duration"]) +func (ob *CrossoverObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) { + query, err := ob.client.RenderQuery(name, namespace, interval, crossoverQueries["request-duration"]) if err != nil { return 0, err } diff --git a/pkg/metrics/envoy_service.go b/pkg/metrics/crossover_service.go similarity index 72% rename from pkg/metrics/envoy_service.go rename to pkg/metrics/crossover_service.go index 5e9d26c37..bde9f0a18 100644 --- a/pkg/metrics/envoy_service.go +++ b/pkg/metrics/crossover_service.go @@ -4,7 +4,7 @@ import ( "time" ) -var envoyServiceQueries = map[string]string{ +var crossoverServiceQueries = map[string]string{ "request-success-rate": ` sum( rate( @@ -39,12 +39,12 @@ var envoyServiceQueries = map[string]string{ )`, } -type EnvoyServiceObserver struct { +type CrossoverServiceObserver struct { client *PrometheusClient } -func (ob *EnvoyServiceObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) { - query, err := ob.client.RenderQuery(name, namespace, interval, envoyServiceQueries["request-success-rate"]) +func (ob *CrossoverServiceObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) { + query, err := ob.client.RenderQuery(name, namespace, interval, crossoverServiceQueries["request-success-rate"]) if err != nil { return 0, err } @@ -57,8 +57,8 @@ func (ob *EnvoyServiceObserver) GetRequestSuccessRate(name string, namespace str return value, nil } -func (ob *EnvoyServiceObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) { - query, err := ob.client.RenderQuery(name, namespace, interval, envoyServiceQueries["request-duration"]) +func (ob *CrossoverServiceObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) { + query, err := ob.client.RenderQuery(name, namespace, interval, crossoverServiceQueries["request-duration"]) if err != nil { return 0, err } diff --git a/pkg/metrics/envoy_service_test.go b/pkg/metrics/crossover_service_test.go similarity index 89% rename from pkg/metrics/envoy_service_test.go rename to pkg/metrics/crossover_service_test.go index fca854bd0..8d65bbab0 100644 --- a/pkg/metrics/envoy_service_test.go +++ b/pkg/metrics/crossover_service_test.go @@ -7,7 +7,7 @@ import ( "time" ) -func TestEnvoyServiceObserver_GetRequestSuccessRate(t *testing.T) { +func TestCrossoverServiceObserver_GetRequestSuccessRate(t *testing.T) { expected := ` sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name="podinfo-canary", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name="podinfo-canary" }[1m] ) ) * 100` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -26,7 +26,7 @@ func TestEnvoyServiceObserver_GetRequestSuccessRate(t *testing.T) { t.Fatal(err) } - observer := &EnvoyServiceObserver{ + observer := &CrossoverServiceObserver{ client: client, } @@ -40,7 +40,7 @@ func TestEnvoyServiceObserver_GetRequestSuccessRate(t *testing.T) { } } -func TestEnvoyServiceObserver_GetRequestDuration(t *testing.T) { +func TestCrossoverServiceObserver_GetRequestDuration(t *testing.T) { expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="default", envoy_cluster_name="podinfo-canary" }[1m] ) ) by (le) )` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -59,7 +59,7 @@ func TestEnvoyServiceObserver_GetRequestDuration(t *testing.T) { t.Fatal(err) } - observer := &EnvoyServiceObserver{ + observer := &CrossoverServiceObserver{ client: client, } diff --git a/pkg/metrics/envoy_test.go b/pkg/metrics/crossover_test.go similarity index 90% rename from pkg/metrics/envoy_test.go rename to pkg/metrics/crossover_test.go index 3852fd350..dd788a6f5 100644 --- a/pkg/metrics/envoy_test.go +++ b/pkg/metrics/crossover_test.go @@ -7,7 +7,7 @@ import ( "time" ) -func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) { +func TestCrossoverObserver_GetRequestSuccessRate(t *testing.T) { expected := ` sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary" }[1m] ) ) * 100` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -26,7 +26,7 @@ func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) { t.Fatal(err) } - observer := &EnvoyObserver{ + observer := &CrossoverObserver{ client: client, } @@ -40,7 +40,7 @@ func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) { } } -func TestEnvoyObserver_GetRequestDuration(t *testing.T) { +func TestCrossoverObserver_GetRequestDuration(t *testing.T) { expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="default", envoy_cluster_name=~"podinfo-canary" }[1m] ) ) by (le) )` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -59,7 +59,7 @@ func TestEnvoyObserver_GetRequestDuration(t *testing.T) { t.Fatal(err) } - observer := &EnvoyObserver{ + observer := &CrossoverObserver{ client: client, } diff --git a/pkg/metrics/factory.go b/pkg/metrics/factory.go index ce6d85d07..c34b0d453 100644 --- a/pkg/metrics/factory.go +++ b/pkg/metrics/factory.go @@ -30,8 +30,12 @@ func (factory Factory) Observer(provider string) Interface { return &HttpObserver{ client: factory.Client, } - case provider == "appmesh", provider == "envoy": - return &EnvoyObserver{ + case provider == "appmesh": + return &AppMeshObserver{ + client: factory.Client, + } + case provider == "crossover": + return &CrossoverObserver{ client: factory.Client, } case provider == "nginx": @@ -43,7 +47,7 @@ func (factory Factory) Observer(provider string) Interface { client: factory.Client, } case provider == "appmesh:service", provider == "envoy:service": - return &EnvoyServiceObserver{ + return &CrossoverServiceObserver{ client: factory.Client, } case provider == "linkerd": From 266b957fc6efb3669cf24cd31e249b598a7b6aee Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Wed, 18 Dec 2019 22:11:21 +0900 Subject: [PATCH 13/13] Fix CrossoverServiceObserver's ID --- pkg/metrics/factory.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/metrics/factory.go b/pkg/metrics/factory.go index c34b0d453..be4f43149 100644 --- a/pkg/metrics/factory.go +++ b/pkg/metrics/factory.go @@ -46,7 +46,11 @@ func (factory Factory) Observer(provider string) Interface { return &GlooObserver{ client: factory.Client, } - case provider == "appmesh:service", provider == "envoy:service": + case provider == "smi:linkerd": + return &LinkerdObserver{ + client: factory.Client, + } + case provider == "crossover:service": return &CrossoverServiceObserver{ client: factory.Client, }