From be1a77255c8f307fb98fb4c15c23a376d3a6d61d Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Tue, 16 Apr 2024 15:58:31 -0700 Subject: [PATCH 1/8] ci: add a github action for linting jsonnet (#12527) Signed-off-by: Callum Styan --- .drone/drone.jsonnet | 17 - .drone/drone.yml | 34 +- .github/workflows/lint-jsonnet.yml | 32 ++ .../loki-mixin-compiled-ssd/alerts.yaml | 84 ++--- .../dashboards/loki-chunks.json | 14 +- .../dashboards/loki-logs.json | 40 ++- .../loki-mixin-recording-rules.json | 3 +- .../dashboards/loki-operational.json | 260 +++++++++++----- production/loki-mixin-compiled-ssd/rules.yaml | 90 +++--- production/loki-mixin-compiled/alerts.yaml | 84 ++--- .../dashboards/loki-chunks.json | 14 +- .../dashboards/loki-logs.json | 40 ++- .../loki-mixin-recording-rules.json | 3 +- .../dashboards/loki-operational.json | 270 +++++++++++----- production/loki-mixin-compiled/rules.yaml | 90 +++--- production/loki-mixin/.lint | 165 ++++++++++ production/loki-mixin/alerts.libsonnet | 12 +- .../dashboards/dashboard-loki-logs.json | 44 ++- .../dashboard-loki-operational.json | 290 +++++++++++++----- .../dashboards/dashboard-recording-rules.json | 1 + .../dashboards/loki-chunks.libsonnet | 14 +- production/promtail-mixin/.lint | 33 ++ production/promtail-mixin/alerts.libsonnet | 9 +- .../promtail-mixin/dashboards.libsonnet | 6 +- 24 files changed, 1132 insertions(+), 517 deletions(-) create mode 100644 .github/workflows/lint-jsonnet.yml create mode 100644 production/loki-mixin/.lint create mode 100644 production/promtail-mixin/.lint diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index 400c63bdde0dd..c9084a3eb37a6 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -450,23 +450,6 @@ local build_image_tag = '0.33.1'; }, ], }, - pipeline('mixins') { - workspace: { - base: '/src', - path: 'loki', - }, - steps: [ - make('lint-jsonnet', container=false) { - // Docker image defined at https://github.com/grafana/jsonnet-libs/tree/master/build - image: 'grafana/jsonnet-build:c8b75df', - depends_on: ['clone'], - }, - make('loki-mixin-check', container=false) { - depends_on: ['clone'], - when: onPRs + onPath('production/loki-mixin/**'), - }, - ], - }, pipeline('documentation-checks') { workspace: { base: '/src', diff --git a/.drone/drone.yml b/.drone/drone.yml index ef1f00b82835a..07d0e23b62149 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -132,38 +132,6 @@ workspace: path: loki --- kind: pipeline -name: mixins -steps: -- commands: - - make BUILD_IN_CONTAINER=false lint-jsonnet - depends_on: - - clone - environment: {} - image: grafana/jsonnet-build:c8b75df - name: lint-jsonnet -- commands: - - make BUILD_IN_CONTAINER=false loki-mixin-check - depends_on: - - clone - environment: {} - image: grafana/loki-build-image:0.33.1 - name: loki-mixin-check - when: - event: - - pull_request - paths: - - production/loki-mixin/** -trigger: - ref: - - refs/heads/main - - refs/heads/k??? - - refs/tags/v* - - refs/pull/*/head -workspace: - base: /src - path: loki ---- -kind: pipeline name: documentation-checks steps: - commands: @@ -1340,6 +1308,6 @@ kind: secret name: gpg_private_key --- kind: signature -hmac: dbc4d2b5c84e0464f24846abc8e7e73a5a937df289a0ecdb501f3bca28ebb8e3 +hmac: e0940674c7a2b5ae47c6509b0bc97dc594a054e5b881fd1962b81837d6b1dee6 ... diff --git a/.github/workflows/lint-jsonnet.yml b/.github/workflows/lint-jsonnet.yml new file mode 100644 index 0000000000000..442638c6c7476 --- /dev/null +++ b/.github/workflows/lint-jsonnet.yml @@ -0,0 +1,32 @@ +--- +name: lint-jsonnet +on: [pull_request] + # pull_request: + # paths: "production/**" + +jobs: + check-mixin: + name: Check mixin jsonnet files + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + - name: setup go + uses: actions/setup-go@v5 + with: + go-version: '1.22.2' + - name: setup jsonnet + run: | + go install github.com/google/go-jsonnet/cmd/jsonnet@v0.20.0 + go install github.com/google/go-jsonnet/cmd/jsonnetfmt@v0.20.0 + go install github.com/google/go-jsonnet/cmd/jsonnet-lint@v0.20.0 + go install github.com/monitoring-mixins/mixtool/cmd/mixtool@16dc166166d91e93475b86b9355a4faed2400c18 + go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@v0.5.1 + - name: run linting + run: make BUILD_IN_CONTAINER=false lint-jsonnet + - name: check compiled mixin has been updated + run: | + make BUILD_IN_CONTAINER=false loki-mixin-check + + + diff --git a/production/loki-mixin-compiled-ssd/alerts.yaml b/production/loki-mixin-compiled-ssd/alerts.yaml index 77f285b99c060..7c0825d8580d6 100644 --- a/production/loki-mixin-compiled-ssd/alerts.yaml +++ b/production/loki-mixin-compiled-ssd/alerts.yaml @@ -1,41 +1,45 @@ groups: -- name: loki_alerts - rules: - - alert: LokiRequestErrors - annotations: - message: | - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. - expr: | - 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (namespace, job, route) - / - sum(rate(loki_request_duration_seconds_count[2m])) by (namespace, job, route) - > 10 - for: 15m - labels: - severity: critical - - alert: LokiRequestPanics - annotations: - message: | - {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. - expr: | - sum(increase(loki_panic_total[10m])) by (namespace, job) > 0 - labels: - severity: critical - - alert: LokiRequestLatency - annotations: - message: | - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. - expr: | - cluster_namespace_job_route:loki_request_duration_seconds:99quantile{route!~"(?i).*tail.*|/schedulerpb.SchedulerForQuerier/QuerierLoop"} > 1 - for: 15m - labels: - severity: critical - - alert: LokiTooManyCompactorsRunning - annotations: - message: | - {{ $labels.cluster }} {{ $labels.namespace }} has had {{ printf "%.0f" $value }} compactors running for more than 5m. Only one compactor should run at a time. - expr: | - sum(loki_boltdb_shipper_compactor_running) by (namespace, cluster) > 1 - for: 5m - labels: - severity: warning + - name: loki_alerts + rules: + - alert: LokiRequestErrors + annotations: + description: | + {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. + summary: Loki request error rate is high. + expr: | + 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (namespace, job, route) + / + sum(rate(loki_request_duration_seconds_count[2m])) by (namespace, job, route) + > 10 + for: 15m + labels: + severity: critical + - alert: LokiRequestPanics + annotations: + description: | + {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. + summary: Loki requests are causing code panics. + expr: | + sum(increase(loki_panic_total[10m])) by (namespace, job) > 0 + labels: + severity: critical + - alert: LokiRequestLatency + annotations: + description: | + {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. + summary: Loki request error latency is high. + expr: | + cluster_namespace_job_route:loki_request_duration_seconds:99quantile{route!~"(?i).*tail.*|/schedulerpb.SchedulerForQuerier/QuerierLoop"} > 1 + for: 15m + labels: + severity: critical + - alert: LokiTooManyCompactorsRunning + annotations: + description: | + {{ $labels.cluster }} {{ $labels.namespace }} has had {{ printf "%.0f" $value }} compactors running for more than 5m. Only one compactor should run at a time. + summary: Loki deployment is running more than one compactor. + expr: | + sum(loki_boltdb_shipper_compactor_running) by (namespace, cluster) > 1 + for: 5m + labels: + severity: warning diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-chunks.json b/production/loki-mixin-compiled-ssd/dashboards/loki-chunks.json index fe9c354f4f93c..c393162f15879 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-chunks.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-chunks.json @@ -416,7 +416,7 @@ "span": 6, "targets": [ { - "expr": "sum(rate(loki_chunk_store_index_entries_per_chunk_sum{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m]))", + "expr": "sum(rate(loki_chunk_store_index_entries_per_chunk_sum{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval]))", "format": "time_series", "legendFormat": "Index Entries", "legendLink": null @@ -981,19 +981,19 @@ "span": 12, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[1m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p99", "legendLink": null }, { - "expr": "histogram_quantile(0.90, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[1m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p90", "legendLink": null }, { - "expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[1m])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p50", "legendLink": null @@ -1052,19 +1052,19 @@ "span": 12, "targets": [ { - "expr": "histogram_quantile(0.5, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m])) by (le))", + "expr": "histogram_quantile(0.5, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p50", "legendLink": null }, { - "expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p99", "legendLink": null }, { - "expr": "sum(rate(loki_ingester_chunk_bounds_hours_sum{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m])) / sum(rate(loki_ingester_chunk_bounds_hours_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m]))", + "expr": "sum(rate(loki_ingester_chunk_bounds_hours_sum{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) / sum(rate(loki_ingester_chunk_bounds_hours_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval]))", "format": "time_series", "legendFormat": "avg", "legendLink": null diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json b/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json index 90691632b6c29..27fc922838ef6 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json @@ -114,6 +114,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -236,7 +241,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\"}[5m]))", + "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\"}[$__rate_interval]))", "refId": "A" } ], @@ -287,6 +292,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "bytes" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -373,6 +383,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -408,7 +423,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[5m]))", + "expr": "sum(rate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[$__rate_interval]))", "refId": "A" } ], @@ -459,6 +474,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -494,7 +514,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[5m]))", + "expr": "sum(rate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[$__rate_interval]))", "refId": "A" } ], @@ -632,6 +652,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -667,7 +692,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\", exported_pod=~\"$deployment.*\", exported_pod=~\"$pod\", container=~\"$container\"}[5m])) by (level)", + "expr": "sum(rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\", exported_pod=~\"$deployment.*\", exported_pod=~\"$pod\", container=~\"$container\"}[$__rate_interval])) by (level)", "legendFormat": "{{level}}", "refId": "A" } @@ -719,6 +744,11 @@ "dashLength": 10, "dashes": false, "datasource": "$loki_datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -771,7 +801,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\" } |logfmt| level=\"$level\" |= \"$filter\" [5m])) by (level)", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\" } |logfmt| level=\"$level\" |= \"$filter\" | __error__=\"\" [$__rate_interval])) by (level)", "intervalFactor": 3, "legendFormat": "{{level}}", "refId": "A" diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-mixin-recording-rules.json b/production/loki-mixin-compiled-ssd/dashboards/loki-mixin-recording-rules.json index 1234065bb6f70..f1f6c215f1756 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-mixin-recording-rules.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-mixin-recording-rules.json @@ -300,7 +300,8 @@ "value": 80 } ] - } + }, + "unit": "s" }, "overrides": [ ] }, diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json index 5610b088dceb0..fbaf16610f64f 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json @@ -87,7 +87,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-read\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-read\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", "legendFormat": "{{status}}", "refId": "A" } @@ -183,7 +183,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\", route=~\"api_prom_push|loki_api_v1_push\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\", route=~\"api_prom_push|loki_api_v1_push\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", "legendFormat": "{{status}}", "refId": "A" } @@ -278,7 +278,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant))", + "expr": "topk(10, sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (tenant))", "legendFormat": "{{tenant}}", "refId": "A" } @@ -332,7 +332,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "MBs" }, "overrides": [ ] }, @@ -374,7 +375,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant)) / 1024 / 1024", + "expr": "topk(10, sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (tenant)) / 1024 / 1024", "legendFormat": "{{tenant}}", "refId": "A" } @@ -524,7 +525,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -630,7 +632,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -834,7 +837,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -1040,7 +1044,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -1148,7 +1153,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -1353,7 +1359,8 @@ "description": "", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -1602,7 +1609,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10,sum by (tenant, reason) (rate(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[1m])))", + "expr": "topk(10,sum by (tenant, reason) (rate(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[$__rate_interval])))", "interval": "", "legendFormat": "{{ tenant }} - {{ reason }}", "refId": "A" @@ -1727,7 +1734,7 @@ ], "targets": [ { - "expr": "topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[1m])[$__range:1m])))", + "expr": "topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[$__rate_interval])[$__range:$__rate_interval])))", "format": "table", "instant": true, "interval": "", @@ -1852,6 +1859,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -1985,7 +1997,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/(loki|enterprise-logs)-write\"} | logfmt | level=\"error\"[1m]))", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/(loki|enterprise-logs)-write\"} | logfmt | level=\"error\"[$__rate_interval]))", "refId": "A" } ], @@ -2153,6 +2165,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2189,7 +2206,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_distributor_ingester_append_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (pod)", + "expr": "sum(rate(loki_distributor_ingester_append_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (pod)", "intervalFactor": 1, "legendFormat": "{{pod}}", "refId": "A" @@ -2242,6 +2259,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2278,7 +2300,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (pod)", + "expr": "sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (pod)", "intervalFactor": 1, "legendFormat": "{{pod}}", "refId": "A" @@ -2331,6 +2353,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2367,7 +2394,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (pod)", + "expr": "sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (pod)", "intervalFactor": 1, "legendFormat": "{{pod}}", "refId": "A" @@ -2565,7 +2592,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, sum by (tenant) (rate(loki_ingester_streams_created_total{cluster=\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\"}[1m]) > 0))", + "expr": "topk(10, sum by (tenant) (rate(loki_ingester_streams_created_total{cluster=\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\"}[$__rate_interval]) > 0))", "interval": "", "legendFormat": "{{ tenant }}", "refId": "A" @@ -2675,13 +2702,13 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\"}[1m]))", + "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\"}[$__rate_interval]))", "interval": "", "legendFormat": "Chunks", "refId": "A" }, { - "expr": "sum(increase(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"}[1m]))/sum(increase(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"}[1m])) < 1", + "expr": "sum(increase(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"}[$__rate_interval]))/sum(increase(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"}[$__rate_interval])) < 1", "interval": "", "legendFormat": "De-Dupe Ratio", "refId": "B" @@ -2759,7 +2786,7 @@ "reverseYBuckets": false, "targets": [ { - "expr": "sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\"}[1m])) by (le)", + "expr": "sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le)", "format": "heatmap", "instant": false, "interval": "", @@ -2914,7 +2941,7 @@ "reverseYBuckets": false, "targets": [ { - "expr": "sum by (le) (rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"}[1m]))", + "expr": "sum by (le) (rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"}[$__rate_interval]))", "format": "heatmap", "instant": false, "interval": "", @@ -3059,6 +3086,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3192,7 +3224,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/(loki|enterprise-logs)-read\"} | logfmt | level=\"error\"[1m]))", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/(loki|enterprise-logs)-read\"} | logfmt | level=\"error\"[$__rate_interval]))", "refId": "A" } ], @@ -3376,6 +3408,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3415,19 +3452,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le, container))", + "expr": "histogram_quantile(.99, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (method, name, le, container))", "intervalFactor": 1, "legendFormat": "{{container}}: .99-{{method}}-{{name}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le, container))", + "expr": "histogram_quantile(.9, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (method, name, le, container))", "hide": false, "legendFormat": "{{container}}: .9-{{method}}-{{name}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le, container))", + "expr": "histogram_quantile(.5, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (method, name, le, container))", "hide": false, "legendFormat": "{{container}}: .5-{{method}}-{{name}}", "refId": "C" @@ -3519,7 +3556,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_memcache_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, method, name, container)", + "expr": "sum(rate(loki_memcache_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, method, name, container)", "intervalFactor": 1, "legendFormat": "{{container}}: {{status_code}}-{{method}}-{{name}}", "refId": "A" @@ -3588,6 +3625,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3627,19 +3669,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -3692,6 +3734,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3731,7 +3778,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, status_code, method)", + "expr": "sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, status_code, method)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -3800,6 +3847,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3837,17 +3889,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".9", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (operation, le))", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (operation, le))", "refId": "C" } ], @@ -3898,6 +3950,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3935,20 +3992,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (operation, le))", "interval": "", "intervalFactor": 1, "legendFormat": "99%", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "90%", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "50%", "refId": "C" @@ -4001,6 +4058,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4038,20 +4100,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (operation, le))", "interval": "", "intervalFactor": 1, "legendFormat": "99%", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "90%", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "50%", "refId": "C" @@ -4104,6 +4166,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4141,17 +4208,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".9", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (operation, le))", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (operation, le))", "refId": "C" } ], @@ -4202,6 +4269,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4239,7 +4311,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4292,6 +4364,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4329,7 +4406,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4382,6 +4459,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4419,7 +4501,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4472,6 +4554,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4509,7 +4596,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4578,6 +4665,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4617,19 +4709,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -4721,7 +4813,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -4825,7 +4917,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -4911,7 +5003,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_consumed_capacity_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_consumed_capacity_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -4997,7 +5089,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_throttled_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_throttled_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5083,7 +5175,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_dropped_requests_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_dropped_requests_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5169,17 +5261,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))", + "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))", + "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))", + "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "legendFormat": ".5", "refId": "C" } @@ -5231,6 +5323,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5269,19 +5366,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -5372,7 +5469,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_dynamo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -5441,6 +5538,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5479,19 +5581,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -5582,7 +5684,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -5651,6 +5753,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5689,19 +5796,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -5792,7 +5899,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -5861,6 +5968,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5899,19 +6011,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -6002,7 +6114,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" diff --git a/production/loki-mixin-compiled-ssd/rules.yaml b/production/loki-mixin-compiled-ssd/rules.yaml index 2a54ed4fb2e5b..5893770570f6e 100644 --- a/production/loki-mixin-compiled-ssd/rules.yaml +++ b/production/loki-mixin-compiled-ssd/rules.yaml @@ -1,53 +1,39 @@ groups: -- name: loki_rules - rules: - - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, job)) - record: cluster_job:loki_request_duration_seconds:99quantile - - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, job)) - record: cluster_job:loki_request_duration_seconds:50quantile - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job) / sum(rate(loki_request_duration_seconds_count[1m])) - by (cluster, job) - record: cluster_job:loki_request_duration_seconds:avg - - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job) - record: cluster_job:loki_request_duration_seconds_bucket:sum_rate - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job) - record: cluster_job:loki_request_duration_seconds_sum:sum_rate - - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job) - record: cluster_job:loki_request_duration_seconds_count:sum_rate - - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, job, route)) - record: cluster_job_route:loki_request_duration_seconds:99quantile - - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, job, route)) - record: cluster_job_route:loki_request_duration_seconds:50quantile - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route) - / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route) - record: cluster_job_route:loki_request_duration_seconds:avg - - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job, - route) - record: cluster_job_route:loki_request_duration_seconds_bucket:sum_rate - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route) - record: cluster_job_route:loki_request_duration_seconds_sum:sum_rate - - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route) - record: cluster_job_route:loki_request_duration_seconds_count:sum_rate - - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, namespace, job, route)) - record: cluster_namespace_job_route:loki_request_duration_seconds:99quantile - - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, namespace, job, route)) - record: cluster_namespace_job_route:loki_request_duration_seconds:50quantile - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace, - job, route) / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, - namespace, job, route) - record: cluster_namespace_job_route:loki_request_duration_seconds:avg - - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, - job, route) - record: cluster_namespace_job_route:loki_request_duration_seconds_bucket:sum_rate - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace, - job, route) - record: cluster_namespace_job_route:loki_request_duration_seconds_sum:sum_rate - - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, namespace, - job, route) - record: cluster_namespace_job_route:loki_request_duration_seconds_count:sum_rate + - name: loki_rules + rules: + - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job)) + record: cluster_job:loki_request_duration_seconds:99quantile + - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job)) + record: cluster_job:loki_request_duration_seconds:50quantile + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job) / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job) + record: cluster_job:loki_request_duration_seconds:avg + - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job) + record: cluster_job:loki_request_duration_seconds_bucket:sum_rate + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job) + record: cluster_job:loki_request_duration_seconds_sum:sum_rate + - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job) + record: cluster_job:loki_request_duration_seconds_count:sum_rate + - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job, route)) + record: cluster_job_route:loki_request_duration_seconds:99quantile + - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job, route)) + record: cluster_job_route:loki_request_duration_seconds:50quantile + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route) / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route) + record: cluster_job_route:loki_request_duration_seconds:avg + - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job, route) + record: cluster_job_route:loki_request_duration_seconds_bucket:sum_rate + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route) + record: cluster_job_route:loki_request_duration_seconds_sum:sum_rate + - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route) + record: cluster_job_route:loki_request_duration_seconds_count:sum_rate + - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, job, route)) + record: cluster_namespace_job_route:loki_request_duration_seconds:99quantile + - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, job, route)) + record: cluster_namespace_job_route:loki_request_duration_seconds:50quantile + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace, job, route) / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, namespace, job, route) + record: cluster_namespace_job_route:loki_request_duration_seconds:avg + - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, job, route) + record: cluster_namespace_job_route:loki_request_duration_seconds_bucket:sum_rate + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace, job, route) + record: cluster_namespace_job_route:loki_request_duration_seconds_sum:sum_rate + - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, namespace, job, route) + record: cluster_namespace_job_route:loki_request_duration_seconds_count:sum_rate diff --git a/production/loki-mixin-compiled/alerts.yaml b/production/loki-mixin-compiled/alerts.yaml index 77f285b99c060..7c0825d8580d6 100644 --- a/production/loki-mixin-compiled/alerts.yaml +++ b/production/loki-mixin-compiled/alerts.yaml @@ -1,41 +1,45 @@ groups: -- name: loki_alerts - rules: - - alert: LokiRequestErrors - annotations: - message: | - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. - expr: | - 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (namespace, job, route) - / - sum(rate(loki_request_duration_seconds_count[2m])) by (namespace, job, route) - > 10 - for: 15m - labels: - severity: critical - - alert: LokiRequestPanics - annotations: - message: | - {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. - expr: | - sum(increase(loki_panic_total[10m])) by (namespace, job) > 0 - labels: - severity: critical - - alert: LokiRequestLatency - annotations: - message: | - {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. - expr: | - cluster_namespace_job_route:loki_request_duration_seconds:99quantile{route!~"(?i).*tail.*|/schedulerpb.SchedulerForQuerier/QuerierLoop"} > 1 - for: 15m - labels: - severity: critical - - alert: LokiTooManyCompactorsRunning - annotations: - message: | - {{ $labels.cluster }} {{ $labels.namespace }} has had {{ printf "%.0f" $value }} compactors running for more than 5m. Only one compactor should run at a time. - expr: | - sum(loki_boltdb_shipper_compactor_running) by (namespace, cluster) > 1 - for: 5m - labels: - severity: warning + - name: loki_alerts + rules: + - alert: LokiRequestErrors + annotations: + description: | + {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. + summary: Loki request error rate is high. + expr: | + 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (namespace, job, route) + / + sum(rate(loki_request_duration_seconds_count[2m])) by (namespace, job, route) + > 10 + for: 15m + labels: + severity: critical + - alert: LokiRequestPanics + annotations: + description: | + {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. + summary: Loki requests are causing code panics. + expr: | + sum(increase(loki_panic_total[10m])) by (namespace, job) > 0 + labels: + severity: critical + - alert: LokiRequestLatency + annotations: + description: | + {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. + summary: Loki request error latency is high. + expr: | + cluster_namespace_job_route:loki_request_duration_seconds:99quantile{route!~"(?i).*tail.*|/schedulerpb.SchedulerForQuerier/QuerierLoop"} > 1 + for: 15m + labels: + severity: critical + - alert: LokiTooManyCompactorsRunning + annotations: + description: | + {{ $labels.cluster }} {{ $labels.namespace }} has had {{ printf "%.0f" $value }} compactors running for more than 5m. Only one compactor should run at a time. + summary: Loki deployment is running more than one compactor. + expr: | + sum(loki_boltdb_shipper_compactor_running) by (namespace, cluster) > 1 + for: 5m + labels: + severity: warning diff --git a/production/loki-mixin-compiled/dashboards/loki-chunks.json b/production/loki-mixin-compiled/dashboards/loki-chunks.json index b1304ffaf7538..59dd5286d45ad 100644 --- a/production/loki-mixin-compiled/dashboards/loki-chunks.json +++ b/production/loki-mixin-compiled/dashboards/loki-chunks.json @@ -416,7 +416,7 @@ "span": 6, "targets": [ { - "expr": "sum(rate(loki_chunk_store_index_entries_per_chunk_sum{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m]))", + "expr": "sum(rate(loki_chunk_store_index_entries_per_chunk_sum{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval]))", "format": "time_series", "legendFormat": "Index Entries", "legendLink": null @@ -981,19 +981,19 @@ "span": 12, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[1m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p99", "legendLink": null }, { - "expr": "histogram_quantile(0.90, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[1m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p90", "legendLink": null }, { - "expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[1m])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p50", "legendLink": null @@ -1052,19 +1052,19 @@ "span": 12, "targets": [ { - "expr": "histogram_quantile(0.5, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m])) by (le))", + "expr": "histogram_quantile(0.5, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p50", "legendLink": null }, { - "expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le))", "format": "time_series", "legendFormat": "p99", "legendLink": null }, { - "expr": "sum(rate(loki_ingester_chunk_bounds_hours_sum{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m])) / sum(rate(loki_ingester_chunk_bounds_hours_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m]))", + "expr": "sum(rate(loki_ingester_chunk_bounds_hours_sum{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) / sum(rate(loki_ingester_chunk_bounds_hours_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval]))", "format": "time_series", "legendFormat": "avg", "legendLink": null diff --git a/production/loki-mixin-compiled/dashboards/loki-logs.json b/production/loki-mixin-compiled/dashboards/loki-logs.json index 90691632b6c29..27fc922838ef6 100644 --- a/production/loki-mixin-compiled/dashboards/loki-logs.json +++ b/production/loki-mixin-compiled/dashboards/loki-logs.json @@ -114,6 +114,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -236,7 +241,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\"}[5m]))", + "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\"}[$__rate_interval]))", "refId": "A" } ], @@ -287,6 +292,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "bytes" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -373,6 +383,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -408,7 +423,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[5m]))", + "expr": "sum(rate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[$__rate_interval]))", "refId": "A" } ], @@ -459,6 +474,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -494,7 +514,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[5m]))", + "expr": "sum(rate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[$__rate_interval]))", "refId": "A" } ], @@ -632,6 +652,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -667,7 +692,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\", exported_pod=~\"$deployment.*\", exported_pod=~\"$pod\", container=~\"$container\"}[5m])) by (level)", + "expr": "sum(rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\", exported_pod=~\"$deployment.*\", exported_pod=~\"$pod\", container=~\"$container\"}[$__rate_interval])) by (level)", "legendFormat": "{{level}}", "refId": "A" } @@ -719,6 +744,11 @@ "dashLength": 10, "dashes": false, "datasource": "$loki_datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -771,7 +801,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\" } |logfmt| level=\"$level\" |= \"$filter\" [5m])) by (level)", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\" } |logfmt| level=\"$level\" |= \"$filter\" | __error__=\"\" [$__rate_interval])) by (level)", "intervalFactor": 3, "legendFormat": "{{level}}", "refId": "A" diff --git a/production/loki-mixin-compiled/dashboards/loki-mixin-recording-rules.json b/production/loki-mixin-compiled/dashboards/loki-mixin-recording-rules.json index 1234065bb6f70..f1f6c215f1756 100644 --- a/production/loki-mixin-compiled/dashboards/loki-mixin-recording-rules.json +++ b/production/loki-mixin-compiled/dashboards/loki-mixin-recording-rules.json @@ -300,7 +300,8 @@ "value": 80 } ] - } + }, + "unit": "s" }, "overrides": [ ] }, diff --git a/production/loki-mixin-compiled/dashboards/loki-operational.json b/production/loki-mixin-compiled/dashboards/loki-operational.json index 133dbc27b51b5..394b720be16b5 100644 --- a/production/loki-mixin-compiled/dashboards/loki-operational.json +++ b/production/loki-mixin-compiled/dashboards/loki-operational.json @@ -87,7 +87,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/query-frontend\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/query-frontend\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", "legendFormat": "{{status}}", "refId": "A" } @@ -183,7 +183,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/distributor\", route=~\"api_prom_push|loki_api_v1_push\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/distributor\", route=~\"api_prom_push|loki_api_v1_push\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", "legendFormat": "{{status}}", "refId": "A" } @@ -237,7 +237,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "ops" }, "overrides": [ ] }, @@ -374,7 +375,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant))", + "expr": "topk(10, sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (tenant))", "legendFormat": "{{tenant}}", "refId": "A" } @@ -428,7 +429,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "MBs" }, "overrides": [ ] }, @@ -470,7 +472,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant)) / 1024 / 1024", + "expr": "topk(10, sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (tenant)) / 1024 / 1024", "legendFormat": "{{tenant}}", "refId": "A" } @@ -620,7 +622,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -726,7 +729,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -930,7 +934,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -1136,7 +1141,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -1244,7 +1250,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -1449,7 +1456,8 @@ "description": "", "fieldConfig": { "defaults": { - "custom": { } + "custom": { }, + "unit": "s" }, "overrides": [ ] }, @@ -1698,7 +1706,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10,sum by (tenant, reason) (rate(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[1m])))", + "expr": "topk(10,sum by (tenant, reason) (rate(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[$__rate_interval])))", "interval": "", "legendFormat": "{{ tenant }} - {{ reason }}", "refId": "A" @@ -1823,7 +1831,7 @@ ], "targets": [ { - "expr": "topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[1m])[$__range:1m])))", + "expr": "topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[$__rate_interval])[$__range:$__rate_interval])))", "format": "table", "instant": true, "interval": "", @@ -1948,6 +1956,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2081,7 +2094,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/distributor\"} | logfmt | level=\"error\"[1m]))", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/distributor\"} | logfmt | level=\"error\"[$__rate_interval]))", "refId": "A" } ], @@ -2249,6 +2262,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2285,7 +2303,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_distributor_ingester_append_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (pod)", + "expr": "sum(rate(loki_distributor_ingester_append_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (pod)", "intervalFactor": 1, "legendFormat": "{{pod}}", "refId": "A" @@ -2338,6 +2356,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2374,7 +2397,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (pod)", + "expr": "sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (pod)", "intervalFactor": 1, "legendFormat": "{{pod}}", "refId": "A" @@ -2427,6 +2450,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2463,7 +2491,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (pod)", + "expr": "sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (pod)", "intervalFactor": 1, "legendFormat": "{{pod}}", "refId": "A" @@ -2621,6 +2649,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2754,7 +2787,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/ingester.*\"} | logfmt | level=\"error\"[1m]))", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/ingester.*\"} | logfmt | level=\"error\"[$__rate_interval]))", "refId": "A" } ], @@ -3067,7 +3100,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, sum by (tenant) (rate(loki_ingester_streams_created_total{cluster=\"$cluster\",job=~\"($namespace)/ingester.*\"}[1m]) > 0))", + "expr": "topk(10, sum by (tenant) (rate(loki_ingester_streams_created_total{cluster=\"$cluster\",job=~\"($namespace)/ingester.*\"}[$__rate_interval]) > 0))", "interval": "", "legendFormat": "{{ tenant }}", "refId": "A" @@ -3177,13 +3210,13 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=~\"($namespace)/ingester.*\"}[1m]))", + "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=~\"($namespace)/ingester.*\"}[$__rate_interval]))", "interval": "", "legendFormat": "Chunks", "refId": "A" }, { - "expr": "sum(increase(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", job=~\"($namespace)/ingester.*\"}[1m]))/sum(increase(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"($namespace)/ingester.*\"}[1m])) < 1", + "expr": "sum(increase(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", job=~\"($namespace)/ingester.*\"}[$__rate_interval]))/sum(increase(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"($namespace)/ingester.*\"}[$__rate_interval])) < 1", "interval": "", "legendFormat": "De-Dupe Ratio", "refId": "B" @@ -3261,7 +3294,7 @@ "reverseYBuckets": false, "targets": [ { - "expr": "sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\",job=~\"($namespace)/ingester.*\"}[1m])) by (le)", + "expr": "sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\",job=~\"($namespace)/ingester.*\"}[$__rate_interval])) by (le)", "format": "heatmap", "instant": false, "interval": "", @@ -3416,7 +3449,7 @@ "reverseYBuckets": false, "targets": [ { - "expr": "sum by (le) (rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=~\"($namespace)/ingester.*\"}[1m]))", + "expr": "sum by (le) (rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=~\"($namespace)/ingester.*\"}[$__rate_interval]))", "format": "heatmap", "instant": false, "interval": "", @@ -3561,6 +3594,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3694,7 +3732,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/querier\"} | logfmt | level=\"error\"[1m]))", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=~\"($namespace)/querier\"} | logfmt | level=\"error\"[$__rate_interval]))", "refId": "A" } ], @@ -3878,6 +3916,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3917,19 +3960,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le, container))", + "expr": "histogram_quantile(.99, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (method, name, le, container))", "intervalFactor": 1, "legendFormat": "{{container}}: .99-{{method}}-{{name}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le, container))", + "expr": "histogram_quantile(.9, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (method, name, le, container))", "hide": false, "legendFormat": "{{container}}: .9-{{method}}-{{name}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le, container))", + "expr": "histogram_quantile(.5, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (method, name, le, container))", "hide": false, "legendFormat": "{{container}}: .5-{{method}}-{{name}}", "refId": "C" @@ -4021,7 +4064,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_memcache_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, method, name, container)", + "expr": "sum(rate(loki_memcache_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, method, name, container)", "intervalFactor": 1, "legendFormat": "{{container}}: {{status_code}}-{{method}}-{{name}}", "refId": "A" @@ -4090,6 +4133,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4129,19 +4177,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -4194,6 +4242,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4233,7 +4286,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, status_code, method)", + "expr": "sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, status_code, method)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -4302,6 +4355,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4339,17 +4397,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".9", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (operation, le))", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (operation, le))", "refId": "C" } ], @@ -4400,6 +4458,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4437,20 +4500,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (operation, le))", "interval": "", "intervalFactor": 1, "legendFormat": "99%", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "90%", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "50%", "refId": "C" @@ -4503,6 +4566,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4540,20 +4608,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (operation, le))", "interval": "", "intervalFactor": 1, "legendFormat": "99%", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "90%", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "50%", "refId": "C" @@ -4606,6 +4674,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4643,17 +4716,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".9", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (operation, le))", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (operation, le))", "refId": "C" } ], @@ -4704,6 +4777,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4741,7 +4819,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4794,6 +4872,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4831,7 +4914,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4884,6 +4967,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4921,7 +5009,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4974,6 +5062,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5011,7 +5104,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -5080,6 +5173,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5119,19 +5217,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -5223,7 +5321,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -5327,7 +5425,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5413,7 +5511,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_consumed_capacity_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_consumed_capacity_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5499,7 +5597,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_throttled_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_throttled_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5585,7 +5683,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_dropped_requests_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_dropped_requests_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5671,17 +5769,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))", + "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))", + "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))", + "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "legendFormat": ".5", "refId": "C" } @@ -5733,6 +5831,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5771,19 +5874,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -5874,7 +5977,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_dynamo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -5943,6 +6046,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5981,19 +6089,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -6084,7 +6192,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -6153,6 +6261,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -6191,19 +6304,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -6294,7 +6407,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -6363,6 +6476,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -6401,19 +6519,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -6504,7 +6622,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" diff --git a/production/loki-mixin-compiled/rules.yaml b/production/loki-mixin-compiled/rules.yaml index 2a54ed4fb2e5b..5893770570f6e 100644 --- a/production/loki-mixin-compiled/rules.yaml +++ b/production/loki-mixin-compiled/rules.yaml @@ -1,53 +1,39 @@ groups: -- name: loki_rules - rules: - - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, job)) - record: cluster_job:loki_request_duration_seconds:99quantile - - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, job)) - record: cluster_job:loki_request_duration_seconds:50quantile - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job) / sum(rate(loki_request_duration_seconds_count[1m])) - by (cluster, job) - record: cluster_job:loki_request_duration_seconds:avg - - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job) - record: cluster_job:loki_request_duration_seconds_bucket:sum_rate - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job) - record: cluster_job:loki_request_duration_seconds_sum:sum_rate - - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job) - record: cluster_job:loki_request_duration_seconds_count:sum_rate - - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, job, route)) - record: cluster_job_route:loki_request_duration_seconds:99quantile - - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, job, route)) - record: cluster_job_route:loki_request_duration_seconds:50quantile - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route) - / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route) - record: cluster_job_route:loki_request_duration_seconds:avg - - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job, - route) - record: cluster_job_route:loki_request_duration_seconds_bucket:sum_rate - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route) - record: cluster_job_route:loki_request_duration_seconds_sum:sum_rate - - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route) - record: cluster_job_route:loki_request_duration_seconds_count:sum_rate - - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, namespace, job, route)) - record: cluster_namespace_job_route:loki_request_duration_seconds:99quantile - - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) - by (le, cluster, namespace, job, route)) - record: cluster_namespace_job_route:loki_request_duration_seconds:50quantile - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace, - job, route) / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, - namespace, job, route) - record: cluster_namespace_job_route:loki_request_duration_seconds:avg - - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, - job, route) - record: cluster_namespace_job_route:loki_request_duration_seconds_bucket:sum_rate - - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace, - job, route) - record: cluster_namespace_job_route:loki_request_duration_seconds_sum:sum_rate - - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, namespace, - job, route) - record: cluster_namespace_job_route:loki_request_duration_seconds_count:sum_rate + - name: loki_rules + rules: + - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job)) + record: cluster_job:loki_request_duration_seconds:99quantile + - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job)) + record: cluster_job:loki_request_duration_seconds:50quantile + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job) / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job) + record: cluster_job:loki_request_duration_seconds:avg + - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job) + record: cluster_job:loki_request_duration_seconds_bucket:sum_rate + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job) + record: cluster_job:loki_request_duration_seconds_sum:sum_rate + - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job) + record: cluster_job:loki_request_duration_seconds_count:sum_rate + - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job, route)) + record: cluster_job_route:loki_request_duration_seconds:99quantile + - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job, route)) + record: cluster_job_route:loki_request_duration_seconds:50quantile + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route) / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route) + record: cluster_job_route:loki_request_duration_seconds:avg + - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job, route) + record: cluster_job_route:loki_request_duration_seconds_bucket:sum_rate + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route) + record: cluster_job_route:loki_request_duration_seconds_sum:sum_rate + - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route) + record: cluster_job_route:loki_request_duration_seconds_count:sum_rate + - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, job, route)) + record: cluster_namespace_job_route:loki_request_duration_seconds:99quantile + - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, job, route)) + record: cluster_namespace_job_route:loki_request_duration_seconds:50quantile + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace, job, route) / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, namespace, job, route) + record: cluster_namespace_job_route:loki_request_duration_seconds:avg + - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, job, route) + record: cluster_namespace_job_route:loki_request_duration_seconds_bucket:sum_rate + - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace, job, route) + record: cluster_namespace_job_route:loki_request_duration_seconds_sum:sum_rate + - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, namespace, job, route) + record: cluster_namespace_job_route:loki_request_duration_seconds_count:sum_rate diff --git a/production/loki-mixin/.lint b/production/loki-mixin/.lint new file mode 100644 index 0000000000000..108cbcc7c8581 --- /dev/null +++ b/production/loki-mixin/.lint @@ -0,0 +1,165 @@ +exclusions: + template-job-rule: + reason: "Prometheus datasource variable is being named as prometheus_datasource now while linter expects 'datasource'" + entries: + - dashboard: "Loki / Operational" + - dashboard: "Loki / Deletion" + - dashboard: "Loki / Chunks" + - dashboard: "Loki / Retention" + - dashboard: "Loki / Recording Rules" + - dashboard: "Loki / Reads" + - dashboard: "Loki / Reads Resources" + - dashboard: "Loki / Logs" + - dashboard: "Loki / Writes Resources" + - dashboard: "Loki / Writes" + template-datasource-rule: + reason: "Based on new convention we are using variable names prometheus_datasource and loki_datasource where as linter expects 'datasource'" + entries: + - dashboard: "Loki / Operational" + - dashboard: "Loki / Deletion" + - dashboard: "Loki / Chunks" + - dashboard: "Loki / Retention" + - dashboard: "Loki / Recording Rules" + - dashboard: "Loki / Reads" + - dashboard: "Loki / Reads Resources" + - dashboard: "Loki / Logs" + - dashboard: "Loki / Writes Resources" + - dashboard: "Loki / Writes" + template-instance-rule: + reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes" + entries: + - dashboard: "Loki / Operational" + - dashboard: "Loki / Deletion" + - dashboard: "Loki / Chunks" + - dashboard: "Loki / Retention" + - dashboard: "Loki / Writes" + - dashboard: "Loki / Recording Rules" + - dashboard: "Loki / Reads" + - dashboard: "Loki / Reads Resources" + - dashboard: "Loki / Logs" + - dashboard: "Loki / Writes Resources" + target-instance-rule: + reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes" + entries: + - dashboard: "Loki / Operational" + - dashboard: "Loki / Deletion" + - dashboard: "Loki / Chunks" + - dashboard: "Loki / Retention" + - dashboard: "Loki / Reads" + - dashboard: "Loki / Recording Rules" + - dashboard: "Loki / Reads Resources" + - dashboard: "Loki / Logs" + - dashboard: "Loki / Writes Resources" + - dashboard: "Loki / Writes" + target-job-rule: + reason: "We don't have/need a job template selector for this dashboard" + entries: + - dashboard: "Loki / Operational" + - dashboard: "Loki / Deletion" + - dashboard: "Loki / Chunks" + - dashboard: "Loki / Retention" + - dashboard: "Loki / Reads" + - dashboard: "Loki / Recording Rules" + - dashboard: "Loki / Reads Resources" + - dashboard: "Loki / Logs" + - dashboard: "Loki / Writes Resources" + - dashboard: "Loki / Writes" + target-promql-rule: + reason: "The following are logql queries, not promql" + entries: + - dashboard: "Loki / Operational" + panel: "Error Log Rate" + - dashboard: "Loki / Operational" + panel: "Bad Words" + - dashboard: "Loki / Logs" + panel: "Log Rate" + panel-datasource-rule: + reason: "Loki datasource variable is being named as loki_datasource now while linter expects 'datasource'" + entries: + - dashboard: + - dashboard: "Loki / Logs" + panel: "Log Rate" + target-rate-interval-rule: + reason: "This query is using an offset, the lint failure here is not for a rate interval" + entries: + - dashboard: "Loki / Operational" + panel: "Bad Words" + panel-title-description-rule: + reason: "There's not much value in requiring this for every panel" + template-on-time-change-reload-rule: + reason: "Fixing this correctly is an upstream jsonnet library change" + panel-units-rule: + reason: "Units are implied from panel title or not applicable" + entries: + - dashboard: "Loki / Operational" + panel: "Queries/Second" + - dashboard: "Loki / Operational" + panel: "Writes/Second" + - dashboard: "Loki / Operational" + panel: "Lines Per Tenant (top 10)" + - dashboard: "Loki / Recording Rules" + panel: "Appenders Not Ready" + - dashboard: "Loki / Recording Rules" + panel: "Samples Appended to WAL per Second" + - dashboard: "Loki / Recording Rules" + panel: "Series Created per Second" + - dashboard: "Loki / Recording Rules" + panel: "Samples Sent per Second" + - dashboard: "Loki / Recording Rules" + panel: "Pending Samples" + - dashboard: "Loki / Logs" + panel: "goroutines" + - dashboard: "Loki / Logs" + panel: "restarts" + - dashboard: "Loki / Logs" + panel: "cpu" + - dashboard: "Loki / Deletion" + panel: "Number of Pending Requests" + - dashboard: "Loki / Deletion" + panel: "Oldest Pending Request Age" + - dashboard: "Loki / Operational" + panel: "Pushes/Second" + - dashboard: "Loki / Operational" + panel: "Distributor Success Rate" + - dashboard: "Loki / Operational" + panel: "Status By Method" + - dashboard: "Loki / Operational" + panel: "Discarded Lines" + - dashboard: "Loki / Operational" + panel: "Error Log Rate" + - dashboard: "Loki / Operational" + panel: "Success Rate" + - dashboard: "Loki / Operational" + panel: "Append Failures By Ingester" + - dashboard: "Loki / Operational" + panel: "MBs Per Tenant" + - dashboard: "Loki / Operational" + panel: "Container Restarts" + - dashboard: "Loki / Operational" + panel: "CPU Usage" + - dashboard: "Loki / Operational" + panel: "Active Streams" + - dashboard: "Loki / Operational" + panel: "Streams Created/Sec" + - dashboard: "Loki / Operational" + panel: "Chunks Flushed/Sec" + - dashboard: "Loki / Operational" + panel: "Chunk Flush Reason %" + - dashboard: "Loki / Operational" + panel: "Ingester Success Rate Write" + - dashboard: "Loki / Operational" + panel: "Ingester Success Rate Read" + - dashboard: "Loki / Operational" + panel: "Querier Success Rate" + - dashboard: "Loki / Operational" + panel: "Failure Rate" + - dashboard: "Loki / Operational" + panel: "Failure Rate" + - dashboard: "Loki / Operational" + panel: "Consumed Capacity Rate" + - dashboard: "Loki / Operational" + panel: "Dropped Rate" + - dashboard: "Loki / Operational" + panel: "Query Pages" + - dashboard: "Loki / Operational" + panel: "Throttled Rate" \ No newline at end of file diff --git a/production/loki-mixin/alerts.libsonnet b/production/loki-mixin/alerts.libsonnet index 0045cc194ba3a..089ed7439da3a 100644 --- a/production/loki-mixin/alerts.libsonnet +++ b/production/loki-mixin/alerts.libsonnet @@ -17,7 +17,8 @@ severity: 'critical', }, annotations: { - message: ||| + summary: 'Loki request error rate is high.', + description: ||| {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. |||, }, @@ -31,7 +32,8 @@ severity: 'critical', }, annotations: { - message: ||| + summary: 'Loki requests are causing code panics.', + description: ||| {{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics. |||, }, @@ -46,7 +48,8 @@ severity: 'critical', }, annotations: { - message: ||| + summary: 'Loki request error latency is high.', + description: ||| {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. |||, }, @@ -61,7 +64,8 @@ severity: 'warning', }, annotations: { - message: ||| + summary: 'Loki deployment is running more than one compactor.', + description: ||| {{ $labels.cluster }} {{ $labels.namespace }} has had {{ printf "%.0f" $value }} compactors running for more than 5m. Only one compactor should run at a time. |||, }, diff --git a/production/loki-mixin/dashboards/dashboard-loki-logs.json b/production/loki-mixin/dashboards/dashboard-loki-logs.json index bcb5737aab52c..6999a5389f9da 100644 --- a/production/loki-mixin/dashboards/dashboard-loki-logs.json +++ b/production/loki-mixin/dashboards/dashboard-loki-logs.json @@ -116,6 +116,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -238,7 +243,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\"}[5m]))", + "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\"}[$__rate_interval]))", "refId": "A" } ], @@ -289,6 +294,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "bytes" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -375,6 +385,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -410,7 +425,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[5m]))", + "expr": "sum(rate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[$__rate_interval]))", "refId": "A" } ], @@ -461,6 +476,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -496,7 +516,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[5m]))", + "expr": "sum(rate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\"}[$__rate_interval]))", "refId": "A" } ], @@ -634,6 +654,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -669,7 +694,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\", exported_pod=~\"$deployment.*\", exported_pod=~\"$pod\", container=~\"$container\"}[5m])) by (level)", + "expr": "sum(rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\", exported_pod=~\"$deployment.*\", exported_pod=~\"$pod\", container=~\"$container\"}[$__rate_interval])) by (level)", "legendFormat": "{{level}}", "refId": "A" } @@ -721,6 +746,11 @@ "dashLength": 10, "dashes": false, "datasource": "$loki_datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -773,7 +803,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\" } |logfmt| level=\"$level\" |= \"$filter\" [5m])) by (level)", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$deployment.*\", pod=~\"$pod\", container=~\"$container\" } |logfmt| level=\"$level\" |= \"$filter\" | __error__=\"\" [$__rate_interval])) by (level)", "intervalFactor": 3, "legendFormat": "{{level}}", "refId": "A" @@ -862,8 +892,8 @@ "10s", "30s", "1m", - "5m", - "15m", + "$__rate_interval", + "1$__rate_interval", "30m", "1h", "2h", diff --git a/production/loki-mixin/dashboards/dashboard-loki-operational.json b/production/loki-mixin/dashboards/dashboard-loki-operational.json index 2dd944c202984..674e9ae21de5c 100644 --- a/production/loki-mixin/dashboards/dashboard-loki-operational.json +++ b/production/loki-mixin/dashboards/dashboard-loki-operational.json @@ -90,7 +90,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", "legendFormat": "{{status}}", "refId": "A" } @@ -185,7 +185,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", "legendFormat": "{{status}}", "refId": "A" } @@ -239,7 +239,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {} + "custom": {}, + "unit": "ops" }, "overrides": [] }, @@ -374,7 +375,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant))", + "expr": "topk(10, sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (tenant))", "legendFormat": "{{tenant}}", "refId": "A" } @@ -428,7 +429,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {} + "custom": {}, + "unit": "MBs" }, "overrides": [] }, @@ -469,7 +471,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant)) / 1024 / 1024", + "expr": "topk(10, sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (tenant)) / 1024 / 1024", "legendFormat": "{{tenant}}", "refId": "A" } @@ -618,7 +620,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {} + "custom": {}, + "unit": "s" }, "overrides": [] }, @@ -723,7 +726,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {} + "custom": {}, + "unit": "s" }, "overrides": [] }, @@ -925,7 +929,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {} + "custom": {}, + "unit": "s" }, "overrides": [] }, @@ -1129,7 +1134,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {} + "custom": {}, + "unit": "s" }, "overrides": [] }, @@ -1236,7 +1242,8 @@ "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {} + "custom": {}, + "unit": "s" }, "overrides": [] }, @@ -1439,7 +1446,8 @@ "description": "", "fieldConfig": { "defaults": { - "custom": {} + "custom": {}, + "unit": "s" }, "overrides": [] }, @@ -1685,7 +1693,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10,sum by (tenant, reason) (rate(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[1m])))", + "expr": "topk(10,sum by (tenant, reason) (rate(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[$__rate_interval])))", "interval": "", "legendFormat": "{{ tenant }} - {{ reason }}", "refId": "A" @@ -1809,7 +1817,7 @@ ], "targets": [ { - "expr": "topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[1m])[$__range:1m])))", + "expr": "topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[$__rate_interval])[$__range:$__rate_interval])))", "format": "table", "instant": true, "interval": "", @@ -1844,6 +1852,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fill": 1, "fillGradient": 0, "gridPos": { @@ -1932,6 +1941,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2063,7 +2077,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\"} | logfmt | level=\"error\"[1m]))", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\"} | logfmt | level=\"error\"[$__rate_interval]))", "refId": "A" } ], @@ -2229,6 +2243,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2264,7 +2283,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_distributor_ingester_append_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (pod)", + "expr": "sum(rate(loki_distributor_ingester_append_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (pod)", "intervalFactor": 1, "legendFormat": "{{pod}}", "refId": "A" @@ -2317,6 +2336,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2352,7 +2376,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (pod)", + "expr": "sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (pod)", "intervalFactor": 1, "legendFormat": "{{pod}}", "refId": "A" @@ -2405,6 +2429,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2440,7 +2469,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (pod)", + "expr": "sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (pod)", "intervalFactor": 1, "legendFormat": "{{pod}}", "refId": "A" @@ -2596,6 +2625,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2727,7 +2761,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\"} | logfmt | level=\"error\"[1m]))", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\"} | logfmt | level=\"error\"[$__rate_interval]))", "refId": "A" } ], @@ -3035,7 +3069,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, sum by (tenant) (rate(loki_ingester_streams_created_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m]) > 0))", + "expr": "topk(10, sum by (tenant) (rate(loki_ingester_streams_created_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[$__rate_interval]) > 0))", "interval": "", "legendFormat": "{{ tenant }}", "refId": "A" @@ -3143,13 +3177,13 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m]))", + "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[$__rate_interval]))", "interval": "", "legendFormat": "Chunks", "refId": "A" }, { - "expr": "sum(increase(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", job=\"$namespace/ingester\"}[1m]))/sum(increase(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=\"$namespace/ingester\"}[1m])) < 1", + "expr": "sum(increase(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", job=\"$namespace/ingester\"}[$__rate_interval]))/sum(increase(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=\"$namespace/ingester\"}[$__rate_interval])) < 1", "interval": "", "legendFormat": "De-Dupe Ratio", "refId": "B" @@ -3226,7 +3260,7 @@ "reverseYBuckets": false, "targets": [ { - "expr": "sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m])) by (le)", + "expr": "sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\",job=\"$namespace/ingester\"}[$__rate_interval])) by (le)", "format": "heatmap", "instant": false, "interval": "", @@ -3379,7 +3413,7 @@ "reverseYBuckets": false, "targets": [ { - "expr": "sum by (le) (rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=\"$namespace/ingester\"}[1m]))", + "expr": "sum by (le) (rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=\"$namespace/ingester\"}[$__rate_interval]))", "format": "heatmap", "instant": false, "interval": "", @@ -3522,6 +3556,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "binBps" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3653,7 +3692,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"} | logfmt | level=\"error\"[1m]))", + "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"} | logfmt | level=\"error\"[$__rate_interval]))", "refId": "A" } ], @@ -3834,6 +3873,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3872,19 +3916,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le, container))", + "expr": "histogram_quantile(.99, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (method, name, le, container))", "intervalFactor": 1, "legendFormat": "{{container}}: .99-{{method}}-{{name}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le, container))", + "expr": "histogram_quantile(.9, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (method, name, le, container))", "hide": false, "legendFormat": "{{container}}: .9-{{method}}-{{name}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le, container))", + "expr": "histogram_quantile(.5, sum(rate(loki_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (method, name, le, container))", "hide": false, "legendFormat": "{{container}}: .5-{{method}}-{{name}}", "refId": "C" @@ -3975,7 +4019,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_memcache_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, method, name, container)", + "expr": "sum(rate(loki_memcache_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, method, name, container)", "intervalFactor": 1, "legendFormat": "{{container}}: {{status_code}}-{{method}}-{{name}}", "refId": "A" @@ -4043,6 +4087,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4081,19 +4130,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -4146,6 +4195,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4184,7 +4238,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, status_code, method)", + "expr": "sum(rate(loki_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, status_code, method)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -4252,6 +4306,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4288,17 +4347,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".9", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (operation, le))", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (operation, le))", "refId": "C" } ], @@ -4349,6 +4408,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4385,20 +4449,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (operation, le))", "interval": "", "intervalFactor": 1, "legendFormat": "99%", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "90%", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "50%", "refId": "C" @@ -4451,6 +4515,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4487,20 +4556,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (operation, le))", "interval": "", "intervalFactor": 1, "legendFormat": "99%", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "90%", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (operation, le))", "interval": "", "legendFormat": "50%", "refId": "C" @@ -4553,6 +4622,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4589,17 +4663,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".9", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (operation, le))", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (operation, le))", "refId": "C" } ], @@ -4650,6 +4724,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4686,7 +4765,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4739,6 +4818,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4775,7 +4859,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4828,6 +4912,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4864,7 +4953,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -4917,6 +5006,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "ops" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -4953,7 +5047,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (status_code)", + "expr": "sum(rate(loki_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[$__rate_interval])) by (status_code)", "intervalFactor": 1, "legendFormat": "{{status_code}}", "refId": "A" @@ -5021,6 +5115,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5059,19 +5158,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -5162,7 +5261,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -5264,7 +5363,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5349,7 +5448,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_consumed_capacity_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_consumed_capacity_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5434,7 +5533,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_throttled_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_throttled_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5519,7 +5618,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_dropped_requests_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))", + "expr": "sum(rate(loki_dynamo_dropped_requests_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "refId": "A" } ], @@ -5604,17 +5703,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))", + "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))", + "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))", + "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "legendFormat": ".5", "refId": "C" } @@ -5666,6 +5765,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5703,19 +5807,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -5805,7 +5909,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_dynamo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_dynamo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -5873,6 +5977,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -5910,19 +6019,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -6012,7 +6121,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -6080,6 +6189,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -6117,19 +6231,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -6219,7 +6333,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -6287,6 +6401,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -6324,19 +6443,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -6426,7 +6545,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_cassandra_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_cassandra_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -6494,6 +6613,11 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -6531,19 +6655,19 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "intervalFactor": 1, "legendFormat": ".99-{{operation}}", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.9, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".9-{{operation}}", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))", + "expr": "histogram_quantile(.5, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (operation, le))", "hide": false, "legendFormat": ".5-{{operation}}", "refId": "C" @@ -6633,7 +6757,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)", + "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (status_code, operation)", "intervalFactor": 1, "legendFormat": "{{status_code}}-{{operation}}", "refId": "A" @@ -6697,9 +6821,9 @@ "refresh_intervals": [ "10s", "30s", - "1m", - "5m", - "15m", + "$__rate_interval", + "$__rate_interval", + "1$__rate_interval", "30m", "1h", "2h", diff --git a/production/loki-mixin/dashboards/dashboard-recording-rules.json b/production/loki-mixin/dashboards/dashboard-recording-rules.json index 2861e8717c155..d94f131fbadf6 100644 --- a/production/loki-mixin/dashboards/dashboard-recording-rules.json +++ b/production/loki-mixin/dashboards/dashboard-recording-rules.json @@ -273,6 +273,7 @@ "color": { "mode": "palette-classic" }, + "unit": "s", "custom": { "axisLabel": "", "axisPlacement": "auto", diff --git a/production/loki-mixin/dashboards/loki-chunks.libsonnet b/production/loki-mixin/dashboards/loki-chunks.libsonnet index 87dfff7a4f064..dcb086977db0e 100644 --- a/production/loki-mixin/dashboards/loki-chunks.libsonnet +++ b/production/loki-mixin/dashboards/loki-chunks.libsonnet @@ -49,7 +49,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; .addPanel( $.newQueryPanel('Index Entries Per Chunk') + $.queryPanel( - 'sum(rate(loki_chunk_store_index_entries_per_chunk_sum{%s}[5m])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{%s}[5m]))' % [ + 'sum(rate(loki_chunk_store_index_entries_per_chunk_sum{%s}[$__rate_interval])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{%s}[$__rate_interval]))' % [ dashboards['loki-chunks.json'].labelsSelector, dashboards['loki-chunks.json'].labelsSelector, ], @@ -139,9 +139,9 @@ local utils = import 'mixin-utils/utils.libsonnet'; $.newQueryPanel('Chunk Size Quantiles', 'bytes') + $.queryPanel( [ - 'histogram_quantile(0.99, sum(rate(loki_ingester_chunk_size_bytes_bucket{%s}[1m])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, - 'histogram_quantile(0.90, sum(rate(loki_ingester_chunk_size_bytes_bucket{%s}[1m])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, - 'histogram_quantile(0.50, sum(rate(loki_ingester_chunk_size_bytes_bucket{%s}[1m])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, + 'histogram_quantile(0.99, sum(rate(loki_ingester_chunk_size_bytes_bucket{%s}[$__rate_interval])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, + 'histogram_quantile(0.90, sum(rate(loki_ingester_chunk_size_bytes_bucket{%s}[$__rate_interval])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, + 'histogram_quantile(0.50, sum(rate(loki_ingester_chunk_size_bytes_bucket{%s}[$__rate_interval])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, ], [ 'p99', @@ -157,9 +157,9 @@ local utils = import 'mixin-utils/utils.libsonnet'; $.newQueryPanel('Chunk Duration hours (end-start)') + $.queryPanel( [ - 'histogram_quantile(0.5, sum(rate(loki_ingester_chunk_bounds_hours_bucket{%s}[5m])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, - 'histogram_quantile(0.99, sum(rate(loki_ingester_chunk_bounds_hours_bucket{%s}[5m])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, - 'sum(rate(loki_ingester_chunk_bounds_hours_sum{%s}[5m])) / sum(rate(loki_ingester_chunk_bounds_hours_count{%s}[5m]))' % [ + 'histogram_quantile(0.5, sum(rate(loki_ingester_chunk_bounds_hours_bucket{%s}[$__rate_interval])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, + 'histogram_quantile(0.99, sum(rate(loki_ingester_chunk_bounds_hours_bucket{%s}[$__rate_interval])) by (le))' % dashboards['loki-chunks.json'].labelsSelector, + 'sum(rate(loki_ingester_chunk_bounds_hours_sum{%s}[$__rate_interval])) / sum(rate(loki_ingester_chunk_bounds_hours_count{%s}[$__rate_interval]))' % [ dashboards['loki-chunks.json'].labelsSelector, dashboards['loki-chunks.json'].labelsSelector, ], diff --git a/production/promtail-mixin/.lint b/production/promtail-mixin/.lint new file mode 100644 index 0000000000000..996e1e086b998 --- /dev/null +++ b/production/promtail-mixin/.lint @@ -0,0 +1,33 @@ +exclusions: + template-job-rule: + reason: "Prometheus datasource variable is being named as prometheus_datasource now while linter expects 'datasource'" + entries: + - dashboard: "Loki / Promtail" + panel-datasource-rule: + reason: "Loki datasource variable is being named as loki_datasource now while linter expects 'datasource'" + entries: + - dashboard: "Loki / Promtail" + template-datasource-rule: + reason: "Based on new convention we are using variable names prometheus_datasource and loki_datasource where as linter expects 'datasource'" + entries: + - dashboard: "Loki / Promtail" + template-instance-rule: + reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes" + entries: + - dashboard: "Loki / Promtail" + target-instance-rule: + reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes" + entries: + - dashboard: "Loki / Promtail" + target-job-rule: + reason: "We don't have/need a job template selector for this dashboard" + entries: + - dashboard: "Loki / Promtail" + panel-title-description-rule: + reason: "Panel descriptions are not very useful here" + entries: + - dashboard: "Loki / Promtail" + template-on-time-change-reload-rule: + reason: "Fixing this correctly is an upstream jsonnet library change" + panel-units-rule: + reason: "Fixing this correctly is an upstream jsonnet library change" \ No newline at end of file diff --git a/production/promtail-mixin/alerts.libsonnet b/production/promtail-mixin/alerts.libsonnet index c9e15fb323f50..41fde7a68fd9c 100644 --- a/production/promtail-mixin/alerts.libsonnet +++ b/production/promtail-mixin/alerts.libsonnet @@ -17,7 +17,8 @@ severity: 'critical', }, annotations: { - message: ||| + summary: 'Promtail request error rate is high.', + description: ||| {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors. |||, }, @@ -32,7 +33,8 @@ severity: 'critical', }, annotations: { - message: ||| + summary: 'Promtail request latency P99 is high.', + description: ||| {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency. |||, }, @@ -47,7 +49,8 @@ severity: 'warning', }, annotations: { - message: ||| + summary: 'Promtail cannot find a file it should be tailing.', + description: ||| {{ $labels.instance }} {{ $labels.job }} {{ $labels.path }} matches the glob but is not being tailed. |||, }, diff --git a/production/promtail-mixin/dashboards.libsonnet b/production/promtail-mixin/dashboards.libsonnet index 3413f1937c70d..e68f64e7d348f 100644 --- a/production/promtail-mixin/dashboards.libsonnet +++ b/production/promtail-mixin/dashboards.libsonnet @@ -13,7 +13,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; local dashboards = self, local labelsSelector = dashboard._config.per_cluster_label + '=~"$cluster", namespace=~"$namespace"', - local quantileLabelSelector = dashboard._config.per_cluster_label + '=~"$cluster", job=~"$namespace/promtail"', + local quantileLabelSelector = dashboard._config.per_cluster_label + '=~"$cluster", job=~"$namespace/promtail.*"', 'promtail.json': { local cfg = self, @@ -44,7 +44,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; .addPanel( g.panel('Bps') + g.queryPanel( - 'sum(rate(promtail_read_bytes_total{%s}[1m]))' % labelsSelector, + 'sum(rate(promtail_read_bytes_total{%s}[$__rate_interval]))' % labelsSelector, 'logs read', ) + { yaxes: g.yaxes('Bps') }, @@ -52,7 +52,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; .addPanel( g.panel('Lines') + g.queryPanel( - 'sum(rate(promtail_read_lines_total{%s}[1m]))' % labelsSelector, + 'sum(rate(promtail_read_lines_total{%s}[$__rate_interval]))' % labelsSelector, 'lines read', ), ) From 2f24b670cbd79be3be8a62a12b52b8966f587998 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Wed, 17 Apr 2024 08:28:17 +0200 Subject: [PATCH 2/8] chore: Restructure `indexgateway` component package (#12630) This PR moves code for the index gateway server and client components into the `pkg/indexgateway` directory, so it that the index gateway has its own top-level package like other components, such as ingester, distributor, querier, ... Signed-off-by: Christian Haudum --- .../client.go} | 52 ++++++++----------- .../client_pool.go} | 12 ++--- .../client_test.go} | 38 +++++--------- .../indexshipper => }/indexgateway/config.go | 0 .../indexshipper => }/indexgateway/gateway.go | 0 .../indexgateway/gateway_test.go | 0 .../indexshipper => }/indexgateway/grpc.go | 0 .../indexshipper => }/indexgateway/metrics.go | 0 .../indexgateway/shufflesharding.go | 0 pkg/loki/loki.go | 2 +- pkg/loki/modules.go | 2 +- pkg/loki/modules_test.go | 2 +- pkg/querier/querier.go | 16 +++--- pkg/storage/factory.go | 5 +- pkg/storage/store.go | 5 +- .../stores/shipper/indexshipper/shipper.go | 14 ++--- pkg/util/limiter/combined_limits.go | 2 +- tools/doc-generator/parse/root_blocks.go | 2 +- 18 files changed, 64 insertions(+), 88 deletions(-) rename pkg/{storage/stores/shipper/indexshipper/gatewayclient/gateway_client.go => indexgateway/client.go} (93%) rename pkg/{storage/stores/shipper/indexshipper/gatewayclient/index_gateway_grpc_pool.go => indexgateway/client_pool.go} (64%) rename pkg/{storage/stores/shipper/indexshipper/gatewayclient/gateway_client_test.go => indexgateway/client_test.go} (94%) rename pkg/{storage/stores/shipper/indexshipper => }/indexgateway/config.go (100%) rename pkg/{storage/stores/shipper/indexshipper => }/indexgateway/gateway.go (100%) rename pkg/{storage/stores/shipper/indexshipper => }/indexgateway/gateway_test.go (100%) rename pkg/{storage/stores/shipper/indexshipper => }/indexgateway/grpc.go (100%) rename pkg/{storage/stores/shipper/indexshipper => }/indexgateway/metrics.go (100%) rename pkg/{storage/stores/shipper/indexshipper => }/indexgateway/shufflesharding.go (100%) diff --git a/pkg/storage/stores/shipper/indexshipper/gatewayclient/gateway_client.go b/pkg/indexgateway/client.go similarity index 93% rename from pkg/storage/stores/shipper/indexshipper/gatewayclient/gateway_client.go rename to pkg/indexgateway/client.go index 472f6c019e85e..2acdad06937e3 100644 --- a/pkg/storage/stores/shipper/indexshipper/gatewayclient/gateway_client.go +++ b/pkg/indexgateway/client.go @@ -1,4 +1,4 @@ -package gatewayclient +package indexgateway import ( "context" @@ -30,7 +30,6 @@ import ( "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/syntax" "github.com/grafana/loki/v3/pkg/storage/stores/series/index" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding" "github.com/grafana/loki/v3/pkg/util/constants" "github.com/grafana/loki/v3/pkg/util/discovery" @@ -42,12 +41,12 @@ const ( maxConcurrentGrpcCalls = 10 ) -// IndexGatewayClientConfig configures the Index Gateway client used to -// communicate with the Index Gateway server. -type IndexGatewayClientConfig struct { +// ClientConfig configures the Index Gateway client used to communicate with +// the Index Gateway server. +type ClientConfig struct { // Mode sets in which mode the client will operate. It is actually defined at the // index_gateway YAML section and reused here. - Mode indexgateway.Mode `yaml:"-"` + Mode Mode `yaml:"-"` // PoolConfig defines the behavior of the gRPC connection pool used to communicate // with the Index Gateway. @@ -87,39 +86,32 @@ type IndexGatewayClientConfig struct { // RegisterFlagsWithPrefix register client-specific flags with the given prefix. // // Flags that are used by both, client and server, are defined in the indexgateway package. -func (i *IndexGatewayClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { +func (i *ClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { i.GRPCClientConfig.RegisterFlagsWithPrefix(prefix+".grpc", f) f.StringVar(&i.Address, prefix+".server-address", "", "Hostname or IP of the Index Gateway gRPC server running in simple mode. Can also be prefixed with dns+, dnssrv+, or dnssrvnoa+ to resolve a DNS A record with multiple IP's, a DNS SRV record with a followup A record lookup, or a DNS SRV record without a followup A record lookup, respectively.") f.BoolVar(&i.LogGatewayRequests, prefix+".log-gateway-requests", false, "Whether requests sent to the gateway should be logged or not.") } -func (i *IndexGatewayClientConfig) RegisterFlags(f *flag.FlagSet) { +func (i *ClientConfig) RegisterFlags(f *flag.FlagSet) { i.RegisterFlagsWithPrefix("index-gateway-client", f) } type GatewayClient struct { - logger log.Logger - - cfg IndexGatewayClientConfig - + logger log.Logger + cfg ClientConfig storeGatewayClientRequestDuration *prometheus.HistogramVec - - dnsProvider *discovery.DNS - - pool *client.Pool - - ring ring.ReadRing - - limits indexgateway.Limits - - done chan struct{} + dnsProvider *discovery.DNS + pool *client.Pool + ring ring.ReadRing + limits Limits + done chan struct{} } // NewGatewayClient instantiates a new client used to communicate with an Index Gateway instance. // // If it is configured to be in ring mode, a pool of GRPC connections to all Index Gateway instances is created using a ring. // Otherwise, it creates a GRPC connection pool to as many addresses as can be resolved from the given address. -func NewGatewayClient(cfg IndexGatewayClientConfig, r prometheus.Registerer, limits indexgateway.Limits, logger log.Logger, metricsNamespace string) (*GatewayClient, error) { +func NewGatewayClient(cfg ClientConfig, r prometheus.Registerer, limits Limits, logger log.Logger, metricsNamespace string) (*GatewayClient, error) { latency := prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: constants.Loki, Name: "index_gateway_request_duration_seconds", @@ -151,7 +143,7 @@ func NewGatewayClient(cfg IndexGatewayClientConfig, r prometheus.Registerer, lim return nil, errors.Wrap(err, "index gateway grpc dial option") } factory := func(addr string) (client.PoolClient, error) { - igPool, err := NewIndexGatewayGRPCPool(addr, dialOpts) + igPool, err := NewClientPool(addr, dialOpts) if err != nil { return nil, errors.Wrap(err, "new index gateway grpc pool") } @@ -165,7 +157,7 @@ func NewGatewayClient(cfg IndexGatewayClientConfig, r prometheus.Registerer, lim sgClient.cfg.PoolConfig.ClientCleanupPeriod = 5 * time.Second sgClient.cfg.PoolConfig.HealthCheckIngesters = true - if sgClient.cfg.Mode == indexgateway.RingMode { + if sgClient.cfg.Mode == RingMode { sgClient.pool = clientpool.NewPool("index-gateway", sgClient.cfg.PoolConfig, sgClient.ring, client.PoolAddrFunc(factory), logger, metricsNamespace) } else { // Note we don't use clientpool.NewPool because we want to provide our own discovery function @@ -380,7 +372,7 @@ func (s *GatewayClient) getShardsFromStatsFallback( return nil, errors.Wrap(err, "index gateway client get tenant ID") } - p, err := indexgateway.ExtractShardRequestMatchersAndAST(in.Query) + p, err := ExtractShardRequestMatchersAndAST(in.Query) if err != nil { return nil, errors.Wrap(err, "failure while falling back to stats for shard calculation") @@ -531,9 +523,9 @@ func (s *GatewayClient) getServerAddresses(tenantID string) ([]string, error) { // The GRPC pool we use only does discovery calls when cleaning up already existing connections, // so the list of addresses should always be provided from the external provider (ring or DNS) // and not from the RegisteredAddresses method as this list is only populated after a call to GetClientFor - if s.cfg.Mode == indexgateway.RingMode { - r := indexgateway.GetShuffleShardingSubring(s.ring, tenantID, s.limits) - rs, err := r.GetReplicationSetForOperation(indexgateway.IndexesRead) + if s.cfg.Mode == RingMode { + r := GetShuffleShardingSubring(s.ring, tenantID, s.limits) + rs, err := r.GetReplicationSetForOperation(IndexesRead) if err != nil { return nil, errors.Wrap(err, "index gateway get ring") } @@ -587,7 +579,7 @@ func (b *grpcIter) Value() []byte { return b.Rows[b.i].Value } -func instrumentation(cfg IndexGatewayClientConfig, clientRequestDuration *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { +func instrumentation(cfg ClientConfig, clientRequestDuration *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { var unaryInterceptors []grpc.UnaryClientInterceptor unaryInterceptors = append(unaryInterceptors, cfg.GRPCUnaryClientInterceptors...) unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer())) diff --git a/pkg/storage/stores/shipper/indexshipper/gatewayclient/index_gateway_grpc_pool.go b/pkg/indexgateway/client_pool.go similarity index 64% rename from pkg/storage/stores/shipper/indexshipper/gatewayclient/index_gateway_grpc_pool.go rename to pkg/indexgateway/client_pool.go index 825809a252f22..5be1d590f6c6a 100644 --- a/pkg/storage/stores/shipper/indexshipper/gatewayclient/index_gateway_grpc_pool.go +++ b/pkg/indexgateway/client_pool.go @@ -1,4 +1,4 @@ -package gatewayclient +package indexgateway import ( "io" @@ -10,25 +10,25 @@ import ( "github.com/grafana/loki/v3/pkg/logproto" ) -// IndexGatewayGRPCPool represents a pool of gRPC connections to different index gateway instances. +// ClientPool represents a pool of gRPC connections to different index gateway instances. // // Only used when Index Gateway is configured to run in ring mode. -type IndexGatewayGRPCPool struct { +type ClientPool struct { grpc_health_v1.HealthClient logproto.IndexGatewayClient io.Closer } -// NewIndexGatewayGRPCPool instantiates a new pool of IndexGateway GRPC connections. +// NewClientPool instantiates a new pool of IndexGateway GRPC connections. // // Internally, it also instantiates a protobuf index gateway client and a health client. -func NewIndexGatewayGRPCPool(address string, opts []grpc.DialOption) (*IndexGatewayGRPCPool, error) { +func NewClientPool(address string, opts []grpc.DialOption) (*ClientPool, error) { conn, err := grpc.Dial(address, opts...) if err != nil { return nil, errors.Wrap(err, "shipper new grpc pool dial") } - return &IndexGatewayGRPCPool{ + return &ClientPool{ Closer: conn, HealthClient: grpc_health_v1.NewHealthClient(conn), IndexGatewayClient: logproto.NewIndexGatewayClient(conn), diff --git a/pkg/storage/stores/shipper/indexshipper/gatewayclient/gateway_client_test.go b/pkg/indexgateway/client_test.go similarity index 94% rename from pkg/storage/stores/shipper/indexshipper/gatewayclient/gateway_client_test.go rename to pkg/indexgateway/client_test.go index 1dd1bff4abf04..03fdfbcbc1a3c 100644 --- a/pkg/storage/stores/shipper/indexshipper/gatewayclient/gateway_client_test.go +++ b/pkg/indexgateway/client_test.go @@ -1,4 +1,4 @@ -package gatewayclient +package indexgateway import ( "context" @@ -23,27 +23,15 @@ import ( "github.com/grafana/loki/v3/pkg/distributor/clientpool" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/storage/stores/series/index" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" "github.com/grafana/loki/v3/pkg/util/constants" "github.com/grafana/loki/v3/pkg/validation" ) -const ( - // query prefixes - tableNamePrefix = "table-name" - hashValuePrefix = "hash-value" - rangeValuePrefixPrefix = "range-value-prefix" - rangeValueStartPrefix = "range-value-start" - valueEqualPrefix = "value-equal" - - // response prefixes - rangeValuePrefix = "range-value" - valuePrefix = "value" - - // the number of index entries for benchmarking will be divided amongst numTables - //benchMarkNumEntries = 1000000 - //numTables = 50 -) +// const ( +// the number of index entries for benchmarking will be divided amongst numTables +// benchMarkNumEntries = 1000000 +// numTables = 50 +// ) type mockIndexGatewayServer struct { logproto.IndexGatewayServer @@ -187,9 +175,9 @@ func TestGatewayClient_RingMode(t *testing.T) { o, err := validation.NewOverrides(validation.Limits{IndexGatewayShardSize: s}, nil) require.NoError(t, err) - cfg := IndexGatewayClientConfig{} + cfg := ClientConfig{} flagext.DefaultValues(&cfg) - cfg.Mode = indexgateway.RingMode + cfg.Mode = RingMode cfg.Ring = igwRing c, err := NewGatewayClient(cfg, nil, o, logger, constants.Loki) @@ -218,9 +206,9 @@ func TestGatewayClient_RingMode(t *testing.T) { o, err := validation.NewOverrides(validation.Limits{IndexGatewayShardSize: s}, tl) require.NoError(t, err) - cfg := IndexGatewayClientConfig{} + cfg := ClientConfig{} flagext.DefaultValues(&cfg) - cfg.Mode = indexgateway.RingMode + cfg.Mode = RingMode cfg.Ring = igwRing c, err := NewGatewayClient(cfg, nil, o, logger, constants.Loki) @@ -247,8 +235,8 @@ func TestGatewayClient(t *testing.T) { cleanup, storeAddress := createTestGrpcServer(t) t.Cleanup(cleanup) - var cfg IndexGatewayClientConfig - cfg.Mode = indexgateway.SimpleMode + var cfg ClientConfig + cfg.Mode = SimpleMode flagext.DefaultValues(&cfg) cfg.Address = storeAddress cfg.PoolConfig = clientpool.PoolConfig{ClientCleanupPeriod: 500 * time.Millisecond} @@ -437,7 +425,7 @@ func TestDoubleRegistration(t *testing.T) { r := prometheus.NewRegistry() o, _ := validation.NewOverrides(validation.Limits{}, nil) - clientCfg := IndexGatewayClientConfig{ + clientCfg := ClientConfig{ Address: "my-store-address:1234", } diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/config.go b/pkg/indexgateway/config.go similarity index 100% rename from pkg/storage/stores/shipper/indexshipper/indexgateway/config.go rename to pkg/indexgateway/config.go diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go b/pkg/indexgateway/gateway.go similarity index 100% rename from pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go rename to pkg/indexgateway/gateway.go diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway_test.go b/pkg/indexgateway/gateway_test.go similarity index 100% rename from pkg/storage/stores/shipper/indexshipper/indexgateway/gateway_test.go rename to pkg/indexgateway/gateway_test.go diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/grpc.go b/pkg/indexgateway/grpc.go similarity index 100% rename from pkg/storage/stores/shipper/indexshipper/indexgateway/grpc.go rename to pkg/indexgateway/grpc.go diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/metrics.go b/pkg/indexgateway/metrics.go similarity index 100% rename from pkg/storage/stores/shipper/indexshipper/indexgateway/metrics.go rename to pkg/indexgateway/metrics.go diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/shufflesharding.go b/pkg/indexgateway/shufflesharding.go similarity index 100% rename from pkg/storage/stores/shipper/indexshipper/indexgateway/shufflesharding.go rename to pkg/indexgateway/shufflesharding.go diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index f933c5546c052..606d278994480 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -37,6 +37,7 @@ import ( compactorclient "github.com/grafana/loki/v3/pkg/compactor/client" "github.com/grafana/loki/v3/pkg/compactor/deletion" "github.com/grafana/loki/v3/pkg/distributor" + "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/ingester" ingester_client "github.com/grafana/loki/v3/pkg/ingester/client" "github.com/grafana/loki/v3/pkg/loghttp/push" @@ -58,7 +59,6 @@ import ( "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/series/index" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" "github.com/grafana/loki/v3/pkg/tracing" "github.com/grafana/loki/v3/pkg/util" "github.com/grafana/loki/v3/pkg/util/constants" diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 882c0d40d130d..6696f76d45b13 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -45,6 +45,7 @@ import ( "github.com/grafana/loki/v3/pkg/compactor/deletion" "github.com/grafana/loki/v3/pkg/compactor/generationnumber" "github.com/grafana/loki/v3/pkg/distributor" + "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/ingester" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql" @@ -71,7 +72,6 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/boltdb" boltdbcompactor "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/boltdb/compactor" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" "github.com/grafana/loki/v3/pkg/util/constants" "github.com/grafana/loki/v3/pkg/util/httpreq" diff --git a/pkg/loki/modules_test.go b/pkg/loki/modules_test.go index b56769360c38f..8c27c851d33e0 100644 --- a/pkg/loki/modules_test.go +++ b/pkg/loki/modules_test.go @@ -13,13 +13,13 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/storage" "github.com/grafana/loki/v3/pkg/storage/chunk/client/local" "github.com/grafana/loki/v3/pkg/storage/config" bloomshipperconfig "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/boltdb" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" "github.com/grafana/loki/v3/pkg/storage/types" ) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index f4ff897e5ab85..12c68221e0d87 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -13,34 +13,32 @@ import ( "github.com/axiomhq/hyperloglog" "github.com/dustin/go-humanize" "github.com/go-kit/log" - "github.com/opentracing/opentracing-go" - "golang.org/x/exp/slices" - - logql_log "github.com/grafana/loki/v3/pkg/logql/log" - "github.com/grafana/loki/v3/pkg/logqlmodel" - "github.com/grafana/loki/v3/pkg/storage/stores/index" - "github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" - "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/tenant" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" "google.golang.org/grpc/health/grpc_health_v1" "github.com/grafana/loki/v3/pkg/compactor/deletion" + "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/iter" "github.com/grafana/loki/v3/pkg/loghttp" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql" + logql_log "github.com/grafana/loki/v3/pkg/logql/log" "github.com/grafana/loki/v3/pkg/logql/syntax" + "github.com/grafana/loki/v3/pkg/logqlmodel" querier_limits "github.com/grafana/loki/v3/pkg/querier/limits" "github.com/grafana/loki/v3/pkg/querier/plan" "github.com/grafana/loki/v3/pkg/storage" + "github.com/grafana/loki/v3/pkg/storage/stores/index" + "github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume" "github.com/grafana/loki/v3/pkg/storage/stores/index/stats" listutil "github.com/grafana/loki/v3/pkg/util" "github.com/grafana/loki/v3/pkg/util/spanlogger" diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index 5fa0ae204d40d..5932b08afe8ff 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -14,6 +14,7 @@ import ( "github.com/grafana/dskit/flagext" + "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/storage/chunk/cache" "github.com/grafana/loki/v3/pkg/storage/chunk/client" "github.com/grafana/loki/v3/pkg/storage/chunk/client/alibaba" @@ -36,8 +37,6 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/boltdb" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/downloads" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/gatewayclient" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" "github.com/grafana/loki/v3/pkg/storage/types" "github.com/grafana/loki/v3/pkg/util" "github.com/grafana/loki/v3/pkg/util/constants" @@ -381,7 +380,7 @@ func NewIndexClient(periodCfg config.PeriodConfig, tableRange config.TableRange, return indexGatewayClient, nil } - gateway, err := gatewayclient.NewGatewayClient(cfg.BoltDBShipperConfig.IndexGatewayClientConfig, registerer, limits, logger, constants.Loki) + gateway, err := indexgateway.NewGatewayClient(cfg.BoltDBShipperConfig.IndexGatewayClientConfig, registerer, limits, logger, constants.Loki) if err != nil { return nil, err } diff --git a/pkg/storage/store.go b/pkg/storage/store.go index 0d76cbeeff68a..ecf34dee8c790 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -21,6 +21,7 @@ import ( "github.com/grafana/dskit/tenant" "github.com/grafana/loki/v3/pkg/analytics" + "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/iter" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql" @@ -37,8 +38,6 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/series" series_index "github.com/grafana/loki/v3/pkg/storage/stores/series/index" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/gatewayclient" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" "github.com/grafana/loki/v3/pkg/util" "github.com/grafana/loki/v3/pkg/util/deletion" @@ -280,7 +279,7 @@ func (s *LokiStore) storeForPeriod(p config.PeriodConfig, tableRange config.Tabl if p.IndexType == types.TSDBType { if shouldUseIndexGatewayClient(s.cfg.TSDBShipperConfig) { // inject the index-gateway client into the index store - gw, err := gatewayclient.NewGatewayClient(s.cfg.TSDBShipperConfig.IndexGatewayClientConfig, indexClientReg, s.limits, indexClientLogger, s.metricsNamespace) + gw, err := indexgateway.NewGatewayClient(s.cfg.TSDBShipperConfig.IndexGatewayClientConfig, indexClientReg, s.limits, indexClientLogger, s.metricsNamespace) if err != nil { return nil, nil, nil, err } diff --git a/pkg/storage/stores/shipper/indexshipper/shipper.go b/pkg/storage/stores/shipper/indexshipper/shipper.go index 169f7eeb79fee..5b3037c45b086 100644 --- a/pkg/storage/stores/shipper/indexshipper/shipper.go +++ b/pkg/storage/stores/shipper/indexshipper/shipper.go @@ -14,11 +14,11 @@ import ( "github.com/prometheus/client_golang/prometheus" "golang.org/x/sync/errgroup" + "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/storage/chunk/client" "github.com/grafana/loki/v3/pkg/storage/chunk/client/util" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/downloads" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/gatewayclient" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/index" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/uploads" @@ -58,12 +58,12 @@ type IndexShipper interface { } type Config struct { - ActiveIndexDirectory string `yaml:"active_index_directory"` - CacheLocation string `yaml:"cache_location"` - CacheTTL time.Duration `yaml:"cache_ttl"` - ResyncInterval time.Duration `yaml:"resync_interval"` - QueryReadyNumDays int `yaml:"query_ready_num_days"` - IndexGatewayClientConfig gatewayclient.IndexGatewayClientConfig `yaml:"index_gateway_client"` + ActiveIndexDirectory string `yaml:"active_index_directory"` + CacheLocation string `yaml:"cache_location"` + CacheTTL time.Duration `yaml:"cache_ttl"` + ResyncInterval time.Duration `yaml:"resync_interval"` + QueryReadyNumDays int `yaml:"query_ready_num_days"` + IndexGatewayClientConfig indexgateway.ClientConfig `yaml:"index_gateway_client"` IngesterName string Mode Mode diff --git a/pkg/util/limiter/combined_limits.go b/pkg/util/limiter/combined_limits.go index b1bc467e6cac4..39684c7b43e8e 100644 --- a/pkg/util/limiter/combined_limits.go +++ b/pkg/util/limiter/combined_limits.go @@ -5,13 +5,13 @@ import ( "github.com/grafana/loki/v3/pkg/bloomgateway" "github.com/grafana/loki/v3/pkg/compactor" "github.com/grafana/loki/v3/pkg/distributor" + "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/ingester" querier_limits "github.com/grafana/loki/v3/pkg/querier/limits" queryrange_limits "github.com/grafana/loki/v3/pkg/querier/queryrange/limits" "github.com/grafana/loki/v3/pkg/ruler" scheduler_limits "github.com/grafana/loki/v3/pkg/scheduler/limits" "github.com/grafana/loki/v3/pkg/storage" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" ) type CombinedLimits interface { diff --git a/tools/doc-generator/parse/root_blocks.go b/tools/doc-generator/parse/root_blocks.go index 79f98dfd6777e..5699e591ad081 100644 --- a/tools/doc-generator/parse/root_blocks.go +++ b/tools/doc-generator/parse/root_blocks.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/loki/v3/pkg/bloomgateway" "github.com/grafana/loki/v3/pkg/compactor" "github.com/grafana/loki/v3/pkg/distributor" + "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/ingester" ingester_client "github.com/grafana/loki/v3/pkg/ingester/client" "github.com/grafana/loki/v3/pkg/loghttp/push" @@ -41,7 +42,6 @@ import ( "github.com/grafana/loki/v3/pkg/storage/chunk/client/openstack" storage_config "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/series/index" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/indexgateway" "github.com/grafana/loki/v3/pkg/tracing" "github.com/grafana/loki/v3/pkg/validation" ) From 0ee2a6126ae40a1d666f500c19efd639763f1bae Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Wed, 17 Apr 2024 09:46:10 +0200 Subject: [PATCH 3/8] fix(blooms): Fix findGaps when ownership goes to MaxUInt64 and that is covered by existing meta (#12558) --- pkg/bloomcompactor/controller.go | 14 +++++++++++--- pkg/bloomcompactor/controller_test.go | 22 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go index 37a7c6bc69b69..e5416c7866cdd 100644 --- a/pkg/bloomcompactor/controller.go +++ b/pkg/bloomcompactor/controller.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "math" "sort" "sync" @@ -735,7 +736,11 @@ func findGaps(ownershipRange v1.FingerprintBounds, metas []v1.FingerprintBounds) searchRange := ownershipRange.Slice(leftBound, clippedMeta.Max) // update the left bound for the next iteration - leftBound = min(clippedMeta.Max+1, ownershipRange.Max+1) + // We do the max to prevent the max bound to overflow from MaxUInt64 to 0 + leftBound = min( + max(clippedMeta.Max+1, clippedMeta.Max), + max(ownershipRange.Max+1, ownershipRange.Max), + ) // since we've already ensured that the meta is within the ownership range, // we know the xor will be of length zero (when the meta is equal to the ownership range) @@ -750,8 +755,11 @@ func findGaps(ownershipRange v1.FingerprintBounds, metas []v1.FingerprintBounds) gaps = append(gaps, xors[0]) } - if leftBound <= ownershipRange.Max { - // There is a gap between the last meta and the end of the ownership range. + // If the leftBound is less than the ownership range max, and it's smaller than MaxUInt64, + // There is a gap between the last meta and the end of the ownership range. + // Note: we check `leftBound < math.MaxUint64` since in the loop above we clamp the + // leftBound to MaxUint64 to prevent an overflow to 0: `max(clippedMeta.Max+1, clippedMeta.Max)` + if leftBound < math.MaxUint64 && leftBound <= ownershipRange.Max { gaps = append(gaps, v1.NewBounds(leftBound, ownershipRange.Max)) } diff --git a/pkg/bloomcompactor/controller_test.go b/pkg/bloomcompactor/controller_test.go index 2367ee3cc9566..5c6a506473476 100644 --- a/pkg/bloomcompactor/controller_test.go +++ b/pkg/bloomcompactor/controller_test.go @@ -2,6 +2,7 @@ package bloomcompactor import ( "fmt" + "math" "testing" "time" @@ -103,6 +104,27 @@ func Test_findGaps(t *testing.T) { v1.NewBounds(6, 7), }, }, + { + desc: "full ownership range with single meta", + err: false, + exp: nil, + ownershipRange: v1.NewBounds(0, math.MaxUint64), + metas: []v1.FingerprintBounds{ + v1.NewBounds(0, math.MaxUint64), + }, + }, + { + desc: "full ownership range with multiple metas", + err: false, + exp: nil, + ownershipRange: v1.NewBounds(0, math.MaxUint64), + // Three metas covering the whole 0 - MaxUint64 + metas: []v1.FingerprintBounds{ + v1.NewBounds(0, math.MaxUint64/3), + v1.NewBounds(math.MaxUint64/3+1, math.MaxUint64/2), + v1.NewBounds(math.MaxUint64/2+1, math.MaxUint64), + }, + }, } { t.Run(tc.desc, func(t *testing.T) { gaps, err := findGaps(tc.ownershipRange, tc.metas) From 9c25985b970865f054dfa9243cbe984d921df3c8 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 17 Apr 2024 08:30:51 -0700 Subject: [PATCH 4/8] feat(blooms): record time spent resolving shards (#12636) --- pkg/logql/metrics.go | 1 + pkg/logqlmodel/stats/context.go | 15 +- pkg/logqlmodel/stats/stats.pb.go | 217 +++++++++++++--------- pkg/logqlmodel/stats/stats.proto | 2 + pkg/querier/queryrange/codec_test.go | 3 +- pkg/querier/queryrange/prometheus_test.go | 3 +- pkg/storage/stores/index/index.go | 12 ++ pkg/util/marshal/legacy/marshal_test.go | 3 +- pkg/util/marshal/marshal_test.go | 3 +- 9 files changed, 154 insertions(+), 105 deletions(-) diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go index e9921a07c2944..ed8405fc4e6e7 100644 --- a/pkg/logql/metrics.go +++ b/pkg/logql/metrics.go @@ -203,6 +203,7 @@ func RecordRangeAndInstantQueryMetrics( "index_total_chunks", stats.Index.TotalChunks, "index_post_bloom_filter_chunks", stats.Index.PostFilterChunks, "index_bloom_filter_ratio", fmt.Sprintf("%.2f", bloomRatio), + "index_shard_resolver_duration", time.Duration(stats.Index.ShardsDuration), }...) logValues = append(logValues, tagsToKeyValues(queryTags)...) diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go index f895f7fc6c9b0..18794fb137fe8 100644 --- a/pkg/logqlmodel/stats/context.go +++ b/pkg/logqlmodel/stats/context.go @@ -116,11 +116,6 @@ func (c *Context) Caches() Caches { } } -// Index returns the index statistics accumulated so far. -func (c *Context) Index() Index { - return c.index -} - // Reset clears the statistics. func (c *Context) Reset() { c.mtx.Lock() @@ -170,15 +165,6 @@ func JoinIngesters(ctx context.Context, inc Ingester) { stats.ingester.Merge(inc) } -// JoinIndex joins the index statistics in a concurrency-safe manner. -func JoinIndex(ctx context.Context, index Index) { - stats := FromContext(ctx) - stats.mtx.Lock() - defer stats.mtx.Unlock() - - stats.index.Merge(index) -} - // ComputeSummary compute the summary of the statistics. func (r *Result) ComputeSummary(execTime time.Duration, queueTime time.Duration, totalEntriesReturned int) { r.Summary.TotalBytesProcessed = r.Querier.Store.Chunk.DecompressedBytes + r.Querier.Store.Chunk.HeadChunkBytes + @@ -247,6 +233,7 @@ func (i *Ingester) Merge(m Ingester) { func (i *Index) Merge(m Index) { i.TotalChunks += m.TotalChunks i.PostFilterChunks += m.PostFilterChunks + i.ShardsDuration += m.ShardsDuration } func (c *Caches) Merge(m Caches) { diff --git a/pkg/logqlmodel/stats/stats.pb.go b/pkg/logqlmodel/stats/stats.pb.go index 9a728c1612671..432cd55a7703d 100644 --- a/pkg/logqlmodel/stats/stats.pb.go +++ b/pkg/logqlmodel/stats/stats.pb.go @@ -358,6 +358,8 @@ type Index struct { TotalChunks int64 `protobuf:"varint,1,opt,name=totalChunks,proto3" json:"totalChunks"` // Post-filtered chunks PostFilterChunks int64 `protobuf:"varint,2,opt,name=postFilterChunks,proto3" json:"postFilterChunks"` + // Nanosecond duration spent fetching shards + ShardsDuration int64 `protobuf:"varint,3,opt,name=shardsDuration,proto3" json:"shardsDuration"` } func (m *Index) Reset() { *m = Index{} } @@ -406,6 +408,13 @@ func (m *Index) GetPostFilterChunks() int64 { return 0 } +func (m *Index) GetShardsDuration() int64 { + if m != nil { + return m.ShardsDuration + } + return 0 +} + type Querier struct { Store Store `protobuf:"bytes,1,opt,name=store,proto3" json:"store"` } @@ -864,92 +873,94 @@ func init() { func init() { proto.RegisterFile("pkg/logqlmodel/stats/stats.proto", fileDescriptor_6cdfe5d2aea33ebb) } var fileDescriptor_6cdfe5d2aea33ebb = []byte{ - // 1358 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0xcd, 0x6f, 0xdc, 0x44, - 0x14, 0xdf, 0xcd, 0xd6, 0x9b, 0x74, 0xf2, 0xd5, 0x4e, 0x52, 0xba, 0xa5, 0x95, 0x1d, 0x16, 0x2a, - 0x8a, 0x90, 0xb2, 0x2a, 0x45, 0x42, 0x20, 0x2a, 0x21, 0xa7, 0x44, 0xaa, 0x94, 0x8a, 0xf2, 0x16, - 0x04, 0x82, 0x93, 0x63, 0xbf, 0xec, 0x5a, 0xf5, 0xda, 0x8e, 0x3d, 0x0e, 0x8d, 0x84, 0x04, 0x7f, - 0x02, 0x77, 0xee, 0x88, 0x0b, 0x27, 0x4e, 0x9c, 0xb9, 0xf4, 0xd8, 0x63, 0x4f, 0x16, 0xdd, 0x5c, - 0x90, 0x4f, 0xfd, 0x03, 0x38, 0xa0, 0xf9, 0x58, 0x7f, 0xad, 0x37, 0xcd, 0x65, 0x3d, 0xef, 0xf7, - 0xde, 0xef, 0xcd, 0xcc, 0x9b, 0x79, 0xef, 0xcd, 0x92, 0x9d, 0xf0, 0xc9, 0x68, 0xe0, 0x05, 0xa3, - 0x63, 0x6f, 0x12, 0x38, 0xe8, 0x0d, 0x62, 0x66, 0xb1, 0x58, 0xfe, 0xee, 0x86, 0x51, 0xc0, 0x02, - 0xaa, 0x09, 0xe1, 0xcd, 0xed, 0x51, 0x30, 0x0a, 0x04, 0x32, 0xe0, 0x23, 0xa9, 0xec, 0xff, 0xb6, - 0x44, 0xba, 0x80, 0x71, 0xe2, 0x31, 0xfa, 0x31, 0x59, 0x8e, 0x93, 0xc9, 0xc4, 0x8a, 0x4e, 0x7b, - 0xed, 0x9d, 0xf6, 0x9d, 0xd5, 0x0f, 0x36, 0x76, 0xa5, 0x9b, 0xa1, 0x44, 0xcd, 0xcd, 0x67, 0xa9, - 0xd1, 0xca, 0x52, 0x63, 0x66, 0x06, 0xb3, 0x01, 0xa7, 0x1e, 0x27, 0x18, 0xb9, 0x18, 0xf5, 0x96, - 0x2a, 0xd4, 0x2f, 0x25, 0x5a, 0x50, 0x95, 0x19, 0xcc, 0x06, 0xf4, 0x3e, 0x59, 0x71, 0xfd, 0x11, - 0xc6, 0x0c, 0xa3, 0x5e, 0x47, 0x70, 0x37, 0x15, 0xf7, 0xa1, 0x82, 0xcd, 0x2b, 0x8a, 0x9c, 0x1b, - 0x42, 0x3e, 0xa2, 0x1f, 0x92, 0xae, 0x6d, 0xd9, 0x63, 0x8c, 0x7b, 0x97, 0x04, 0x79, 0x5d, 0x91, - 0xf7, 0x04, 0x68, 0xae, 0x2b, 0xaa, 0x26, 0x8c, 0x40, 0xd9, 0xd2, 0xbb, 0x44, 0x73, 0x7d, 0x07, - 0x9f, 0xf6, 0x34, 0x41, 0x5a, 0xcb, 0x67, 0x74, 0xf0, 0x69, 0xc1, 0x11, 0x26, 0x20, 0x3f, 0xfd, - 0x5f, 0x2f, 0x91, 0xee, 0x5e, 0xce, 0xb6, 0xc7, 0x89, 0xff, 0x44, 0x85, 0x69, 0xad, 0x3c, 0x65, - 0x69, 0x46, 0x6e, 0x02, 0xf2, 0x53, 0x4c, 0xb8, 0x74, 0x1e, 0xa5, 0x3c, 0x21, 0xdf, 0x59, 0x24, - 0x0e, 0x46, 0x85, 0xa5, 0xca, 0xd9, 0x50, 0x1c, 0x65, 0x03, 0xea, 0x4b, 0xf7, 0xc8, 0xaa, 0x30, - 0x93, 0x67, 0xaa, 0x82, 0x52, 0xa5, 0x6e, 0x29, 0x6a, 0xd9, 0x10, 0xca, 0x02, 0xdd, 0x27, 0x6b, - 0x27, 0x81, 0x97, 0x4c, 0x50, 0x79, 0xd1, 0x1a, 0xbc, 0x6c, 0x2b, 0x2f, 0x15, 0x4b, 0xa8, 0x48, - 0xdc, 0x4f, 0xcc, 0x4f, 0x79, 0xb6, 0x9a, 0xee, 0x79, 0x7e, 0xca, 0x96, 0x50, 0x91, 0xf8, 0xa6, - 0x3c, 0xeb, 0x10, 0x3d, 0xe5, 0x66, 0xf9, 0xbc, 0x4d, 0x95, 0x0c, 0xa1, 0x2c, 0xd0, 0xef, 0xc9, - 0x96, 0xeb, 0xc7, 0xcc, 0xf2, 0xd9, 0x23, 0x64, 0x91, 0x6b, 0x2b, 0x67, 0x2b, 0x0d, 0xce, 0x6e, - 0x2a, 0x67, 0x4d, 0x04, 0x68, 0x02, 0xfb, 0x7f, 0x75, 0xc9, 0xb2, 0x4a, 0x13, 0xfa, 0x35, 0xb9, - 0x7e, 0x78, 0xca, 0x30, 0x7e, 0x1c, 0x05, 0x36, 0xc6, 0x31, 0x3a, 0x8f, 0x31, 0x1a, 0xa2, 0x1d, - 0xf8, 0x8e, 0xb8, 0x30, 0x1d, 0xf3, 0x66, 0x96, 0x1a, 0x8b, 0x4c, 0x60, 0x91, 0x82, 0xbb, 0xf5, - 0x5c, 0xbf, 0xd1, 0xed, 0x52, 0xe1, 0x76, 0x81, 0x09, 0x2c, 0x52, 0xd0, 0x87, 0x64, 0x8b, 0x05, - 0xcc, 0xf2, 0xcc, 0xca, 0xb4, 0xe2, 0xce, 0x75, 0xcc, 0xeb, 0x3c, 0x08, 0x0d, 0x6a, 0x68, 0x02, - 0x73, 0x57, 0x07, 0x95, 0xa9, 0xc4, 0x1d, 0x2c, 0xbb, 0xaa, 0xaa, 0xa1, 0x09, 0xa4, 0x77, 0xc8, - 0x0a, 0x3e, 0x45, 0xfb, 0x2b, 0x77, 0x82, 0xe2, 0xf6, 0xb5, 0xcd, 0x35, 0x5e, 0x00, 0x66, 0x18, - 0xe4, 0x23, 0xfa, 0x3e, 0xb9, 0x7c, 0x9c, 0x60, 0x82, 0xc2, 0xb4, 0x2b, 0x4c, 0xd7, 0xb3, 0xd4, - 0x28, 0x40, 0x28, 0x86, 0x74, 0x97, 0x90, 0x38, 0x39, 0x94, 0xa5, 0x27, 0x16, 0xf7, 0xa8, 0x63, - 0x6e, 0x64, 0xa9, 0x51, 0x42, 0xa1, 0x34, 0xa6, 0x07, 0x64, 0x5b, 0xac, 0xee, 0x73, 0x9f, 0xc9, - 0xeb, 0xc8, 0x92, 0xc8, 0x47, 0x47, 0x5c, 0x9a, 0x8e, 0xd9, 0xcb, 0x52, 0xa3, 0x51, 0x0f, 0x8d, - 0x28, 0xed, 0x93, 0x6e, 0x1c, 0x7a, 0x2e, 0x8b, 0x7b, 0x97, 0x05, 0x9f, 0xf0, 0xfc, 0x95, 0x08, - 0xa8, 0xaf, 0xb0, 0x19, 0x5b, 0x91, 0x13, 0xf7, 0x48, 0xc9, 0x46, 0x20, 0xa0, 0xbe, 0xf9, 0xaa, - 0x1e, 0x07, 0x31, 0xdb, 0x77, 0x3d, 0x86, 0x91, 0x88, 0x5e, 0x6f, 0xb5, 0xb6, 0xaa, 0x9a, 0x1e, - 0x1a, 0x51, 0xfa, 0x13, 0xb9, 0x2d, 0xf0, 0x21, 0x8b, 0x12, 0x9b, 0x25, 0x11, 0x3a, 0x8f, 0x90, - 0x59, 0x8e, 0xc5, 0xac, 0xda, 0x95, 0x58, 0x13, 0xee, 0xdf, 0xcb, 0x52, 0xe3, 0x62, 0x04, 0xb8, - 0x98, 0x59, 0xff, 0x47, 0xa2, 0x89, 0xc2, 0x4b, 0xef, 0x92, 0x55, 0xc1, 0xd8, 0xe3, 0x25, 0x33, - 0x56, 0xc9, 0xb2, 0xc9, 0x93, 0xba, 0x04, 0x43, 0x59, 0xa0, 0x9f, 0x91, 0x2b, 0x61, 0xbe, 0x1f, - 0xc5, 0x93, 0xd9, 0xb0, 0x9d, 0xa5, 0xc6, 0x9c, 0x0e, 0xe6, 0x90, 0xfe, 0xa7, 0x64, 0x59, 0x35, - 0x29, 0x5e, 0xa4, 0x63, 0x16, 0x44, 0x58, 0xab, 0xeb, 0x43, 0x8e, 0x15, 0x45, 0x5a, 0x98, 0x80, - 0xfc, 0xf4, 0xff, 0x58, 0x22, 0x2b, 0x0f, 0x8b, 0x5e, 0xb4, 0x26, 0xd6, 0x06, 0xc8, 0xab, 0x88, - 0xcc, 0x76, 0xcd, 0xbc, 0xc2, 0x8b, 0x5b, 0x19, 0x87, 0x8a, 0x44, 0xf7, 0x09, 0x2d, 0xed, 0xe8, - 0x91, 0xc5, 0x04, 0x57, 0x6e, 0xe2, 0x8d, 0x2c, 0x35, 0x1a, 0xb4, 0xd0, 0x80, 0xe5, 0xb3, 0x9b, - 0x42, 0x8e, 0x55, 0x06, 0x17, 0xb3, 0x2b, 0x1c, 0x2a, 0x12, 0xfd, 0x84, 0x6c, 0x14, 0xf9, 0x37, - 0x44, 0x9f, 0xa9, 0x74, 0xa5, 0x59, 0x6a, 0xd4, 0x34, 0x50, 0x93, 0x8b, 0x78, 0x69, 0x17, 0x8e, - 0xd7, 0x7f, 0x97, 0x88, 0x26, 0xf4, 0xf9, 0xc4, 0xea, 0x60, 0xf0, 0x48, 0x9d, 0x77, 0x31, 0x71, - 0xae, 0x81, 0x9a, 0x4c, 0xbf, 0x20, 0xd7, 0x4a, 0xc8, 0x83, 0xe0, 0x07, 0xdf, 0x0b, 0x2c, 0x27, - 0x8f, 0xda, 0x8d, 0x2c, 0x35, 0x9a, 0x0d, 0xa0, 0x19, 0xe6, 0x67, 0x60, 0x57, 0x30, 0x51, 0x4d, - 0x3a, 0xc5, 0x19, 0xcc, 0x6b, 0xa1, 0x01, 0xa3, 0x36, 0xb9, 0xc1, 0x4b, 0xc7, 0x29, 0xe0, 0x11, - 0x46, 0xe8, 0xdb, 0xe8, 0x14, 0xb7, 0xbf, 0xb7, 0xbe, 0xd3, 0xbe, 0xb3, 0x62, 0xde, 0xce, 0x52, - 0xe3, 0xad, 0x85, 0x46, 0xb3, 0x14, 0x81, 0xc5, 0x7e, 0x8a, 0xe7, 0x47, 0xad, 0xb9, 0x73, 0x6c, - 0xc1, 0xf3, 0x63, 0xb6, 0x3f, 0xc0, 0xa3, 0x78, 0x1f, 0x99, 0x3d, 0xce, 0x0b, 0x6b, 0x79, 0x7f, - 0x15, 0x2d, 0x34, 0x60, 0xf4, 0x5b, 0xd2, 0xb3, 0x03, 0x71, 0xdd, 0xdd, 0xc0, 0xdf, 0x0b, 0x7c, - 0x16, 0x05, 0xde, 0x81, 0xc5, 0xd0, 0xb7, 0x4f, 0x45, 0xed, 0xed, 0x98, 0xb7, 0xb2, 0xd4, 0x58, - 0x68, 0x03, 0x0b, 0x35, 0xd4, 0x21, 0xb7, 0x42, 0x37, 0x44, 0xde, 0xa5, 0xbe, 0x89, 0xac, 0x30, - 0xc4, 0x48, 0x66, 0x29, 0x3a, 0xb2, 0xb6, 0xc9, 0x5a, 0xbd, 0x93, 0xa5, 0xc6, 0xb9, 0x76, 0x70, - 0xae, 0xb6, 0xff, 0xa7, 0x46, 0x34, 0x11, 0x27, 0x7e, 0xfd, 0xc6, 0x68, 0x39, 0x32, 0x68, 0xbc, - 0x1e, 0x95, 0xef, 0x7d, 0x55, 0x03, 0x35, 0xb9, 0xc2, 0x95, 0xab, 0xd3, 0x1a, 0xb8, 0x72, 0x3d, - 0x35, 0x99, 0xee, 0x91, 0xab, 0x0e, 0xda, 0xc1, 0x24, 0x8c, 0x44, 0xf1, 0x93, 0x53, 0xcb, 0xd0, - 0x5d, 0xcb, 0x52, 0x63, 0x5e, 0x09, 0xf3, 0x50, 0xdd, 0x49, 0x39, 0x42, 0x73, 0x4e, 0xe4, 0x32, - 0xe6, 0x21, 0x7a, 0x9f, 0x6c, 0xd6, 0xd7, 0x21, 0xdb, 0xda, 0x56, 0x96, 0x1a, 0x75, 0x15, 0xd4, - 0x01, 0x4e, 0x17, 0xb9, 0xf4, 0x20, 0x09, 0x3d, 0xd7, 0xb6, 0x38, 0xfd, 0x72, 0x41, 0xaf, 0xa9, - 0xa0, 0x0e, 0x70, 0x7a, 0x58, 0x6b, 0x5f, 0xa4, 0xa0, 0xd7, 0x54, 0x50, 0x07, 0x68, 0x48, 0x76, - 0xf2, 0xc0, 0x2e, 0x68, 0x30, 0xaa, 0x1d, 0xbe, 0x93, 0xa5, 0xc6, 0x6b, 0x6d, 0xe1, 0xb5, 0x16, - 0xf4, 0x94, 0xbc, 0x5d, 0x8e, 0xe1, 0xa2, 0x49, 0x65, 0x93, 0x7c, 0x37, 0x4b, 0x8d, 0x8b, 0x98, - 0xc3, 0x45, 0x8c, 0xfa, 0x7f, 0x77, 0x88, 0x26, 0x1e, 0xa6, 0xbc, 0xc6, 0xa3, 0x7c, 0x54, 0xec, - 0x07, 0x89, 0x5f, 0xe9, 0x30, 0x65, 0x1c, 0x2a, 0x12, 0x6f, 0x92, 0x38, 0x7b, 0x8a, 0x1c, 0x27, - 0xbc, 0x57, 0xc9, 0x4a, 0xa9, 0xc9, 0x26, 0x59, 0xd7, 0xc1, 0x1c, 0x42, 0x3f, 0x22, 0xeb, 0x0a, - 0x13, 0xc5, 0x5b, 0x3e, 0x0f, 0x35, 0xf3, 0x6a, 0x96, 0x1a, 0x55, 0x05, 0x54, 0x45, 0x4e, 0x14, - 0xef, 0x59, 0x40, 0x1b, 0xdd, 0x93, 0xfc, 0x31, 0x28, 0x88, 0x15, 0x05, 0x54, 0x45, 0xfe, 0xac, - 0x13, 0x80, 0x68, 0x49, 0x32, 0xbd, 0xc4, 0xb3, 0x2e, 0x07, 0xa1, 0x18, 0xf2, 0xd7, 0x62, 0x24, - 0xd7, 0x2a, 0x73, 0x49, 0x93, 0xaf, 0xc5, 0x19, 0x06, 0xf9, 0x88, 0x07, 0xd0, 0x29, 0x97, 0xf8, - 0xe5, 0xa2, 0x49, 0x96, 0x71, 0xa8, 0x48, 0x3c, 0xdf, 0x44, 0x39, 0x3e, 0x40, 0x7f, 0xc4, 0xc6, - 0x43, 0x8c, 0x4e, 0xf2, 0x37, 0xa0, 0xc8, 0xb7, 0x39, 0x25, 0xcc, 0x43, 0x26, 0x3e, 0x7f, 0xa9, - 0xb7, 0x5e, 0xbc, 0xd4, 0x5b, 0xaf, 0x5e, 0xea, 0xed, 0x9f, 0xa7, 0x7a, 0xfb, 0xf7, 0xa9, 0xde, - 0x7e, 0x36, 0xd5, 0xdb, 0xcf, 0xa7, 0x7a, 0xfb, 0x9f, 0xa9, 0xde, 0xfe, 0x77, 0xaa, 0xb7, 0x5e, - 0x4d, 0xf5, 0xf6, 0x2f, 0x67, 0x7a, 0xeb, 0xf9, 0x99, 0xde, 0x7a, 0x71, 0xa6, 0xb7, 0xbe, 0x1b, - 0x8c, 0x5c, 0x36, 0x4e, 0x0e, 0x77, 0xed, 0x60, 0x32, 0x18, 0x45, 0xd6, 0x91, 0xe5, 0x5b, 0x03, - 0x2f, 0x78, 0xe2, 0x0e, 0x4e, 0xee, 0x0d, 0x9a, 0xfe, 0xf9, 0x1f, 0x76, 0xc5, 0xff, 0xfa, 0x7b, - 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x19, 0x80, 0x75, 0xde, 0x18, 0x10, 0x00, 0x00, + // 1378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x58, 0xcf, 0x6f, 0xdc, 0x44, + 0x14, 0xde, 0xcd, 0xd6, 0x9b, 0x74, 0xf2, 0xab, 0x9d, 0xa4, 0x74, 0x4b, 0x2b, 0x3b, 0x2c, 0x54, + 0x14, 0x21, 0x65, 0x55, 0x8a, 0x84, 0x40, 0x54, 0x42, 0x4e, 0x89, 0x54, 0x29, 0x15, 0xe5, 0x2d, + 0x08, 0x04, 0x27, 0xc7, 0x9e, 0xec, 0x5a, 0xf5, 0xda, 0x8e, 0x3d, 0x0e, 0xcd, 0x09, 0xfe, 0x04, + 0xee, 0xdc, 0x11, 0x17, 0x4e, 0x5c, 0xe0, 0xcc, 0xa5, 0xc7, 0x1e, 0x7b, 0xb2, 0xe8, 0xe6, 0x82, + 0x7c, 0xea, 0x1f, 0xc0, 0x01, 0xcd, 0x9b, 0x59, 0xff, 0x5a, 0x6f, 0x9a, 0xcb, 0x7a, 0xde, 0xf7, + 0xbe, 0xef, 0xcd, 0xf8, 0x79, 0xe6, 0xbd, 0xd1, 0x92, 0x9d, 0xf0, 0xc9, 0x68, 0xe0, 0x05, 0xa3, + 0x63, 0x6f, 0x12, 0x38, 0xcc, 0x1b, 0xc4, 0xdc, 0xe2, 0xb1, 0xfc, 0xdd, 0x0d, 0xa3, 0x80, 0x07, + 0x54, 0x43, 0xe3, 0xcd, 0xed, 0x51, 0x30, 0x0a, 0x10, 0x19, 0x88, 0x91, 0x74, 0xf6, 0x7f, 0x5d, + 0x22, 0x5d, 0x60, 0x71, 0xe2, 0x71, 0xfa, 0x31, 0x59, 0x8e, 0x93, 0xc9, 0xc4, 0x8a, 0x4e, 0x7b, + 0xed, 0x9d, 0xf6, 0x9d, 0xd5, 0x0f, 0x36, 0x76, 0x65, 0x98, 0xa1, 0x44, 0xcd, 0xcd, 0x67, 0xa9, + 0xd1, 0xca, 0x52, 0x63, 0x46, 0x83, 0xd9, 0x40, 0x48, 0x8f, 0x13, 0x16, 0xb9, 0x2c, 0xea, 0x2d, + 0x55, 0xa4, 0x5f, 0x4a, 0xb4, 0x90, 0x2a, 0x1a, 0xcc, 0x06, 0xf4, 0x3e, 0x59, 0x71, 0xfd, 0x11, + 0x8b, 0x39, 0x8b, 0x7a, 0x1d, 0xd4, 0x6e, 0x2a, 0xed, 0x43, 0x05, 0x9b, 0x57, 0x94, 0x38, 0x27, + 0x42, 0x3e, 0xa2, 0x1f, 0x92, 0xae, 0x6d, 0xd9, 0x63, 0x16, 0xf7, 0x2e, 0xa1, 0x78, 0x5d, 0x89, + 0xf7, 0x10, 0x34, 0xd7, 0x95, 0x54, 0x43, 0x12, 0x28, 0x2e, 0xbd, 0x4b, 0x34, 0xd7, 0x77, 0xd8, + 0xd3, 0x9e, 0x86, 0xa2, 0xb5, 0x7c, 0x46, 0x87, 0x3d, 0x2d, 0x34, 0x48, 0x01, 0xf9, 0xe8, 0xff, + 0x72, 0x89, 0x74, 0xf7, 0x72, 0xb5, 0x3d, 0x4e, 0xfc, 0x27, 0x2a, 0x4d, 0x6b, 0xe5, 0x29, 0x4b, + 0x33, 0x0a, 0x0a, 0xc8, 0x47, 0x31, 0xe1, 0xd2, 0x79, 0x92, 0xf2, 0x84, 0xe2, 0xcd, 0x22, 0xfc, + 0x30, 0x2a, 0x2d, 0x55, 0xcd, 0x86, 0xd2, 0x28, 0x0e, 0xa8, 0x27, 0xdd, 0x23, 0xab, 0x48, 0x93, + 0xdf, 0x54, 0x25, 0xa5, 0x2a, 0xdd, 0x52, 0xd2, 0x32, 0x11, 0xca, 0x06, 0xdd, 0x27, 0x6b, 0x27, + 0x81, 0x97, 0x4c, 0x98, 0x8a, 0xa2, 0x35, 0x44, 0xd9, 0x56, 0x51, 0x2a, 0x4c, 0xa8, 0x58, 0x22, + 0x4e, 0x2c, 0xbe, 0xf2, 0x6c, 0x35, 0xdd, 0xf3, 0xe2, 0x94, 0x99, 0x50, 0xb1, 0xc4, 0x4b, 0x79, + 0xd6, 0x21, 0xf3, 0x54, 0x98, 0xe5, 0xf3, 0x5e, 0xaa, 0x44, 0x84, 0xb2, 0x41, 0xbf, 0x27, 0x5b, + 0xae, 0x1f, 0x73, 0xcb, 0xe7, 0x8f, 0x18, 0x8f, 0x5c, 0x5b, 0x05, 0x5b, 0x69, 0x08, 0x76, 0x53, + 0x05, 0x6b, 0x12, 0x40, 0x13, 0xd8, 0xff, 0xab, 0x4b, 0x96, 0xd5, 0x31, 0xa1, 0x5f, 0x93, 0xeb, + 0x87, 0xa7, 0x9c, 0xc5, 0x8f, 0xa3, 0xc0, 0x66, 0x71, 0xcc, 0x9c, 0xc7, 0x2c, 0x1a, 0x32, 0x3b, + 0xf0, 0x1d, 0xdc, 0x30, 0x1d, 0xf3, 0x66, 0x96, 0x1a, 0x8b, 0x28, 0xb0, 0xc8, 0x21, 0xc2, 0x7a, + 0xae, 0xdf, 0x18, 0x76, 0xa9, 0x08, 0xbb, 0x80, 0x02, 0x8b, 0x1c, 0xf4, 0x21, 0xd9, 0xe2, 0x01, + 0xb7, 0x3c, 0xb3, 0x32, 0x2d, 0xee, 0xb9, 0x8e, 0x79, 0x5d, 0x24, 0xa1, 0xc1, 0x0d, 0x4d, 0x60, + 0x1e, 0xea, 0xa0, 0x32, 0x15, 0xee, 0xc1, 0x72, 0xa8, 0xaa, 0x1b, 0x9a, 0x40, 0x7a, 0x87, 0xac, + 0xb0, 0xa7, 0xcc, 0xfe, 0xca, 0x9d, 0x30, 0xdc, 0x7d, 0x6d, 0x73, 0x4d, 0x14, 0x80, 0x19, 0x06, + 0xf9, 0x88, 0xbe, 0x4f, 0x2e, 0x1f, 0x27, 0x2c, 0x61, 0x48, 0xed, 0x22, 0x75, 0x3d, 0x4b, 0x8d, + 0x02, 0x84, 0x62, 0x48, 0x77, 0x09, 0x89, 0x93, 0x43, 0x59, 0x7a, 0x62, 0xdc, 0x47, 0x1d, 0x73, + 0x23, 0x4b, 0x8d, 0x12, 0x0a, 0xa5, 0x31, 0x3d, 0x20, 0xdb, 0xb8, 0xba, 0xcf, 0x7d, 0x2e, 0xb7, + 0x23, 0x4f, 0x22, 0x9f, 0x39, 0xb8, 0x69, 0x3a, 0x66, 0x2f, 0x4b, 0x8d, 0x46, 0x3f, 0x34, 0xa2, + 0xb4, 0x4f, 0xba, 0x71, 0xe8, 0xb9, 0x3c, 0xee, 0x5d, 0x46, 0x3d, 0x11, 0xe7, 0x57, 0x22, 0xa0, + 0x9e, 0xc8, 0x19, 0x5b, 0x91, 0x13, 0xf7, 0x48, 0x89, 0x83, 0x08, 0xa8, 0x67, 0xbe, 0xaa, 0xc7, + 0x41, 0xcc, 0xf7, 0x5d, 0x8f, 0xb3, 0x08, 0xb3, 0xd7, 0x5b, 0xad, 0xad, 0xaa, 0xe6, 0x87, 0x46, + 0x94, 0xfe, 0x48, 0x6e, 0x23, 0x3e, 0xe4, 0x51, 0x62, 0xf3, 0x24, 0x62, 0xce, 0x23, 0xc6, 0x2d, + 0xc7, 0xe2, 0x56, 0x6d, 0x4b, 0xac, 0x61, 0xf8, 0xf7, 0xb2, 0xd4, 0xb8, 0x98, 0x00, 0x2e, 0x46, + 0xeb, 0xff, 0xd9, 0x26, 0x1a, 0x56, 0x5e, 0x7a, 0x97, 0xac, 0xa2, 0x64, 0x4f, 0xd4, 0xcc, 0x58, + 0x9d, 0x96, 0x4d, 0x71, 0xaa, 0x4b, 0x30, 0x94, 0x0d, 0xfa, 0x19, 0xb9, 0x12, 0xe6, 0x2f, 0xa4, + 0x74, 0xf2, 0x38, 0x6c, 0x67, 0xa9, 0x31, 0xe7, 0x83, 0x39, 0x84, 0x7e, 0x42, 0x36, 0x64, 0x5e, + 0x1f, 0x24, 0x91, 0xc5, 0xdd, 0xc0, 0x57, 0x7b, 0x9f, 0x66, 0xa9, 0x51, 0xf3, 0x40, 0xcd, 0xee, + 0x7f, 0x4a, 0x96, 0x55, 0x87, 0x13, 0x15, 0x3e, 0xe6, 0x41, 0xc4, 0x6a, 0x4d, 0x61, 0x28, 0xb0, + 0xa2, 0xc2, 0x23, 0x05, 0xe4, 0xa3, 0xff, 0xfb, 0x12, 0x59, 0x79, 0x58, 0x34, 0xb2, 0x35, 0x7c, + 0x2f, 0x60, 0xa2, 0x04, 0xc9, 0x52, 0xa1, 0x99, 0x57, 0x44, 0x65, 0x2c, 0xe3, 0x50, 0xb1, 0xe8, + 0x3e, 0xa1, 0xa5, 0x6c, 0x3c, 0xb2, 0x38, 0x6a, 0x65, 0x02, 0xde, 0xc8, 0x52, 0xa3, 0xc1, 0x0b, + 0x0d, 0x58, 0x3e, 0xbb, 0x89, 0x76, 0xac, 0x52, 0x50, 0xcc, 0xae, 0x70, 0xa8, 0x58, 0x22, 0x75, + 0xc5, 0xe1, 0x1d, 0x32, 0x9f, 0xab, 0xb3, 0x8e, 0xa9, 0xab, 0x7a, 0xa0, 0x66, 0x17, 0xf9, 0xd2, + 0x2e, 0x9c, 0xaf, 0xff, 0x2e, 0x11, 0x0d, 0xfd, 0xf9, 0xc4, 0xea, 0xa3, 0xb2, 0x23, 0xb5, 0x57, + 0x8a, 0x89, 0x73, 0x0f, 0xd4, 0x6c, 0xfa, 0x05, 0xb9, 0x56, 0x42, 0x1e, 0x04, 0x3f, 0xf8, 0x5e, + 0x60, 0x39, 0x79, 0xd6, 0x6e, 0x64, 0xa9, 0xd1, 0x4c, 0x80, 0x66, 0x58, 0x7c, 0x03, 0xbb, 0x82, + 0x61, 0x29, 0xea, 0x14, 0xdf, 0x60, 0xde, 0x0b, 0x0d, 0x18, 0xb5, 0xc9, 0x0d, 0x51, 0x77, 0x4e, + 0x81, 0x1d, 0xb1, 0x88, 0xf9, 0x36, 0x73, 0x8a, 0xa3, 0xd3, 0x5b, 0xdf, 0x69, 0xdf, 0x59, 0x31, + 0x6f, 0x67, 0xa9, 0xf1, 0xd6, 0x42, 0xd2, 0xec, 0x7c, 0xc1, 0xe2, 0x38, 0xc5, 0xdd, 0xa5, 0x76, + 0x33, 0x10, 0xd8, 0x82, 0xbb, 0xcb, 0xec, 0xfd, 0x80, 0x1d, 0xc5, 0xfb, 0x8c, 0xdb, 0xe3, 0xbc, + 0x2a, 0x97, 0xdf, 0xaf, 0xe2, 0x85, 0x06, 0x8c, 0x7e, 0x4b, 0x7a, 0x76, 0x80, 0xdb, 0xdd, 0x0d, + 0xfc, 0xbd, 0xc0, 0xe7, 0x51, 0xe0, 0x1d, 0x58, 0x9c, 0xf9, 0xf6, 0x29, 0x16, 0xee, 0x8e, 0x79, + 0x2b, 0x4b, 0x8d, 0x85, 0x1c, 0x58, 0xe8, 0xa1, 0x0e, 0xb9, 0x15, 0xba, 0x21, 0x13, 0x2d, 0xee, + 0x9b, 0xc8, 0x0a, 0x43, 0x16, 0xc9, 0x13, 0xce, 0x1c, 0x59, 0x18, 0x65, 0xa1, 0xdf, 0xc9, 0x52, + 0xe3, 0x5c, 0x1e, 0x9c, 0xeb, 0xed, 0xff, 0xa1, 0x11, 0x0d, 0xf3, 0x24, 0xb6, 0xdf, 0x98, 0x59, + 0x8e, 0x4c, 0x9a, 0x28, 0x66, 0xe5, 0x7d, 0x5f, 0xf5, 0x40, 0xcd, 0xae, 0x68, 0xe5, 0xea, 0xb4, + 0x06, 0xad, 0x5c, 0x4f, 0xcd, 0xa6, 0x7b, 0xe4, 0xaa, 0xc3, 0xec, 0x60, 0x12, 0x46, 0x58, 0x39, + 0xe5, 0xd4, 0x32, 0x75, 0xd7, 0xb2, 0xd4, 0x98, 0x77, 0xc2, 0x3c, 0x54, 0x0f, 0x52, 0xce, 0xd0, + 0x5c, 0x10, 0xb9, 0x8c, 0x79, 0x88, 0xde, 0x27, 0x9b, 0xf5, 0x75, 0xc8, 0x9e, 0xb8, 0x95, 0xa5, + 0x46, 0xdd, 0x05, 0x75, 0x40, 0xc8, 0xf1, 0x2c, 0x3d, 0x48, 0x42, 0xcf, 0xb5, 0x2d, 0x21, 0xbf, + 0x5c, 0xc8, 0x6b, 0x2e, 0xa8, 0x03, 0x42, 0x1e, 0xd6, 0x7a, 0x1f, 0x29, 0xe4, 0x35, 0x17, 0xd4, + 0x01, 0x1a, 0x92, 0x9d, 0x3c, 0xb1, 0x0b, 0xba, 0x93, 0xea, 0xa5, 0xef, 0x64, 0xa9, 0xf1, 0x5a, + 0x2e, 0xbc, 0x96, 0x41, 0x4f, 0xc9, 0xdb, 0xe5, 0x1c, 0x2e, 0x9a, 0x54, 0x76, 0xd8, 0x77, 0xb3, + 0xd4, 0xb8, 0x08, 0x1d, 0x2e, 0x42, 0xea, 0xff, 0xdd, 0x21, 0x1a, 0xde, 0x6a, 0x45, 0x8d, 0x67, + 0xf2, 0x46, 0xb2, 0x1f, 0x24, 0x7e, 0xa5, 0xc3, 0x94, 0x71, 0xa8, 0x58, 0xa2, 0xc1, 0xb2, 0xd9, + 0x3d, 0xe6, 0x38, 0x11, 0xbd, 0x4a, 0x56, 0x4a, 0x4d, 0x36, 0xd8, 0xba, 0x0f, 0xe6, 0x10, 0xfa, + 0x11, 0x59, 0x57, 0x18, 0x16, 0x6f, 0x79, 0xb7, 0xd4, 0xcc, 0xab, 0x59, 0x6a, 0x54, 0x1d, 0x50, + 0x35, 0x85, 0x10, 0x2f, 0xc3, 0xc0, 0x6c, 0xe6, 0x9e, 0xe4, 0x37, 0x49, 0x14, 0x56, 0x1c, 0x50, + 0x35, 0xc5, 0x9d, 0x10, 0x01, 0x6c, 0x49, 0xf2, 0x78, 0xe1, 0x9d, 0x30, 0x07, 0xa1, 0x18, 0x8a, + 0xab, 0x66, 0x24, 0xd7, 0x2a, 0xcf, 0x92, 0x26, 0xaf, 0x9a, 0x33, 0x0c, 0xf2, 0x91, 0x48, 0xa0, + 0x53, 0x2e, 0xf1, 0xcb, 0x45, 0x93, 0x2c, 0xe3, 0x50, 0xb1, 0xc4, 0x79, 0xc3, 0x72, 0x7c, 0xc0, + 0xfc, 0x11, 0x1f, 0x0f, 0x59, 0x74, 0x92, 0x5f, 0x20, 0xf1, 0xbc, 0xcd, 0x39, 0x61, 0x1e, 0x32, + 0xd9, 0xf3, 0x97, 0x7a, 0xeb, 0xc5, 0x4b, 0xbd, 0xf5, 0xea, 0xa5, 0xde, 0xfe, 0x69, 0xaa, 0xb7, + 0x7f, 0x9b, 0xea, 0xed, 0x67, 0x53, 0xbd, 0xfd, 0x7c, 0xaa, 0xb7, 0xff, 0x99, 0xea, 0xed, 0x7f, + 0xa7, 0x7a, 0xeb, 0xd5, 0x54, 0x6f, 0xff, 0x7c, 0xa6, 0xb7, 0x9e, 0x9f, 0xe9, 0xad, 0x17, 0x67, + 0x7a, 0xeb, 0xbb, 0xc1, 0xc8, 0xe5, 0xe3, 0xe4, 0x70, 0xd7, 0x0e, 0x26, 0x83, 0x51, 0x64, 0x1d, + 0x59, 0xbe, 0x35, 0xf0, 0x82, 0x27, 0xee, 0xe0, 0xe4, 0xde, 0xa0, 0xe9, 0x6f, 0x83, 0xc3, 0x2e, + 0xfe, 0x29, 0x70, 0xef, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x64, 0x90, 0x88, 0xaf, 0x55, 0x10, + 0x00, 0x00, } func (this *Result) Equal(that interface{}) bool { @@ -1115,6 +1126,9 @@ func (this *Index) Equal(that interface{}) bool { if this.PostFilterChunks != that1.PostFilterChunks { return false } + if this.ShardsDuration != that1.ShardsDuration { + return false + } return true } func (this *Querier) Equal(that interface{}) bool { @@ -1371,10 +1385,11 @@ func (this *Index) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&stats.Index{") s = append(s, "TotalChunks: "+fmt.Sprintf("%#v", this.TotalChunks)+",\n") s = append(s, "PostFilterChunks: "+fmt.Sprintf("%#v", this.PostFilterChunks)+",\n") + s = append(s, "ShardsDuration: "+fmt.Sprintf("%#v", this.ShardsDuration)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1743,6 +1758,11 @@ func (m *Index) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ShardsDuration != 0 { + i = encodeVarintStats(dAtA, i, uint64(m.ShardsDuration)) + i-- + dAtA[i] = 0x18 + } if m.PostFilterChunks != 0 { i = encodeVarintStats(dAtA, i, uint64(m.PostFilterChunks)) i-- @@ -2158,6 +2178,9 @@ func (m *Index) Size() (n int) { if m.PostFilterChunks != 0 { n += 1 + sovStats(uint64(m.PostFilterChunks)) } + if m.ShardsDuration != 0 { + n += 1 + sovStats(uint64(m.ShardsDuration)) + } return n } @@ -2361,6 +2384,7 @@ func (this *Index) String() string { s := strings.Join([]string{`&Index{`, `TotalChunks:` + fmt.Sprintf("%v", this.TotalChunks) + `,`, `PostFilterChunks:` + fmt.Sprintf("%v", this.PostFilterChunks) + `,`, + `ShardsDuration:` + fmt.Sprintf("%v", this.ShardsDuration) + `,`, `}`, }, "") return s @@ -3316,6 +3340,25 @@ func (m *Index) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardsDuration", wireType) + } + m.ShardsDuration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ShardsDuration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipStats(dAtA[iNdEx:]) diff --git a/pkg/logqlmodel/stats/stats.proto b/pkg/logqlmodel/stats/stats.proto index b53747b7941fd..0b48078e9e932 100644 --- a/pkg/logqlmodel/stats/stats.proto +++ b/pkg/logqlmodel/stats/stats.proto @@ -108,6 +108,8 @@ message Index { int64 totalChunks = 1 [(gogoproto.jsontag) = "totalChunks"]; // Post-filtered chunks int64 postFilterChunks = 2 [(gogoproto.jsontag) = "postFilterChunks"]; + // Nanosecond duration spent fetching shards + int64 shardsDuration = 3 [(gogoproto.jsontag) = "shardsDuration"]; } message Querier { diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index b334ab5b06918..273750f71364d 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -1691,7 +1691,8 @@ var ( }, "index": { "postFilterChunks": 0, - "totalChunks": 0 + "totalChunks": 0, + "shardsDuration": 0 }, "cache": { "chunk": { diff --git a/pkg/querier/queryrange/prometheus_test.go b/pkg/querier/queryrange/prometheus_test.go index 9d47be945b561..2888fbfdd6ace 100644 --- a/pkg/querier/queryrange/prometheus_test.go +++ b/pkg/querier/queryrange/prometheus_test.go @@ -15,7 +15,8 @@ import ( var emptyStats = `"stats": { "index": { "postFilterChunks": 0, - "totalChunks": 0 + "totalChunks": 0, + "shardsDuration": 0 }, "ingester" : { "store": { diff --git a/pkg/storage/stores/index/index.go b/pkg/storage/stores/index/index.go index 26b2a44880047..99e5b35978837 100644 --- a/pkg/storage/stores/index/index.go +++ b/pkg/storage/stores/index/index.go @@ -2,6 +2,7 @@ package index import ( "context" + "time" "github.com/grafana/dskit/instrument" "github.com/prometheus/client_golang/prometheus" @@ -160,7 +161,18 @@ func (m MonitoredReaderWriter) GetShards( var shards *logproto.ShardsResponse if err := loki_instrument.TimeRequest(ctx, "shards", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error { var err error + start := time.Now() shards, err = m.rw.GetShards(ctx, userID, from, through, targetBytesPerShard, predicate) + + if err == nil { + // record duration here from caller to avoid needing to do this in two separate places: + // 1) when we resolve shards from the index alone + // 2) when we resolve shards from the index + blooms + // NB(owen-d): since this is measured by the callee, it does not include time in queue, + // over the wire, etc. + shards.Statistics.Index.ShardsDuration = int64(time.Since(start)) + } + return err }); err != nil { return nil, err diff --git a/pkg/util/marshal/legacy/marshal_test.go b/pkg/util/marshal/legacy/marshal_test.go index b535a390479f9..90fdef6281edc 100644 --- a/pkg/util/marshal/legacy/marshal_test.go +++ b/pkg/util/marshal/legacy/marshal_test.go @@ -58,7 +58,8 @@ var queryTests = []struct { "stats" : { "index": { "postFilterChunks": 0, - "totalChunks": 0 + "totalChunks": 0, + "shardsDuration": 0 }, "ingester" : { "store": { diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go index 1ccbe2158dfc5..c749677f77026 100644 --- a/pkg/util/marshal/marshal_test.go +++ b/pkg/util/marshal/marshal_test.go @@ -27,7 +27,8 @@ import ( const emptyStats = `{ "index": { "postFilterChunks": 0, - "totalChunks": 0 + "totalChunks": 0, + "shardsDuration": 0 }, "ingester" : { "store": { From 006f88cef19d4d1fe14a40287ccdf534f6975475 Mon Sep 17 00:00:00 2001 From: Quentin Bisson Date: Wed, 17 Apr 2024 18:55:46 +0200 Subject: [PATCH 5/8] fix: incorrect compactor matcher in loki-deletion dashboard mixin (#12567) Signed-off-by: QuentinBisson --- .../loki-mixin-compiled-ssd/dashboards/loki-deletion.json | 6 +++--- .../loki-mixin-compiled/dashboards/loki-deletion.json | 2 +- production/loki-mixin/dashboards/loki-deletion.libsonnet | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json index d7748b960885b..e56de2786225a 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json @@ -579,7 +579,7 @@ "span": 6, "targets": [ { - "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\",job=~\"$namespace/(loki|enterprise-logs)-read\"}[$__rate_interval])) by (user)", + "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"}[$__rate_interval])) by (user)", "format": "time_series", "legendFormat": "{{user}}", "legendLink": null @@ -606,7 +606,7 @@ "span": 6, "targets": [ { - "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ", + "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ", "refId": "A" } ], @@ -619,7 +619,7 @@ "span": 6, "targets": [ { - "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"", + "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"", "refId": "A" } ], diff --git a/production/loki-mixin-compiled/dashboards/loki-deletion.json b/production/loki-mixin-compiled/dashboards/loki-deletion.json index 939f37e481a82..2db2b7cb36586 100644 --- a/production/loki-mixin-compiled/dashboards/loki-deletion.json +++ b/production/loki-mixin-compiled/dashboards/loki-deletion.json @@ -579,7 +579,7 @@ "span": 6, "targets": [ { - "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\",job=~\"$namespace/compactor\"}[$__rate_interval])) by (user)", + "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"}[$__rate_interval])) by (user)", "format": "time_series", "legendFormat": "{{user}}", "legendLink": null diff --git a/production/loki-mixin/dashboards/loki-deletion.libsonnet b/production/loki-mixin/dashboards/loki-deletion.libsonnet index 5fdbcc769a588..2acd86a8b1fbb 100644 --- a/production/loki-mixin/dashboards/loki-deletion.libsonnet +++ b/production/loki-mixin/dashboards/loki-deletion.libsonnet @@ -2,7 +2,7 @@ local g = import 'grafana-builder/grafana.libsonnet'; local utils = import 'mixin-utils/utils.libsonnet'; (import 'dashboard-utils.libsonnet') { - local compactor_matcher = if $._config.ssd.enabled then '%s-read' % $._config.ssd.pod_prefix_matcher else 'compactor', + local compactor_matcher = if $._config.ssd.enabled then 'container="loki", pod=~"%s-read.*"' % $._config.ssd.pod_prefix_matcher else 'container="compactor"', grafanaDashboards+:: { 'loki-deletion.json': @@ -61,15 +61,15 @@ local utils = import 'mixin-utils/utils.libsonnet'; ) .addPanel( $.newQueryPanel('Lines Deleted / Sec') + - g.queryPanel('sum(rate(loki_compactor_deleted_lines{' + $._config.per_cluster_label + '=~"$cluster",job=~"$namespace/%s"}[$__rate_interval])) by (user)' % compactor_matcher, '{{user}}'), + g.queryPanel('sum(rate(loki_compactor_deleted_lines{' + $.namespaceMatcher() + ', ' + compactor_matcher + '}[$__rate_interval])) by (user)', '{{user}}'), ) ).addRow( g.row('List of deletion requests') .addPanel( - $.logPanel('In progress/finished', '{%s, container="compactor"} |~ "Started processing delete request|delete request for user marked as processed" | logfmt | line_format "{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}" ' % $.namespaceMatcher()), + $.logPanel('In progress/finished', '{%s, %s} |~ "Started processing delete request|delete request for user marked as processed" | logfmt | line_format "{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}" ' % [$.namespaceMatcher(), compactor_matcher]), ) .addPanel( - $.logPanel('Requests', '{%s, container="compactor"} |~ "delete request for user added" | logfmt | line_format "{{.ts}} user={{.user}} query=\'{{.query}}\'"' % $.namespaceMatcher()), + $.logPanel('Requests', '{%s, %s} |~ "delete request for user added" | logfmt | line_format "{{.ts}} user={{.user}} query=\'{{.query}}\'"' % [$.namespaceMatcher(), compactor_matcher]), ) ), }, From a8b172bf24f55b0964f51d2f2a4845d9edc8c3b5 Mon Sep 17 00:00:00 2001 From: ASKYA Date: Wed, 17 Apr 2024 17:21:02 +0000 Subject: [PATCH 6/8] docs: Add Promtail's `file_sd_configs` scrap_configs example (#11791) Co-authored-by: J Stickler Co-authored-by: Callum Styan --- docs/sources/send-data/promtail/scraping.md | 31 +++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/docs/sources/send-data/promtail/scraping.md b/docs/sources/send-data/promtail/scraping.md index e72fe872e5e30..605bdeb5aaac4 100644 --- a/docs/sources/send-data/promtail/scraping.md +++ b/docs/sources/send-data/promtail/scraping.md @@ -128,6 +128,37 @@ There are different types of labels present in Promtail: uniqueness of the streams. It is set to the absolute path of the file the line was read from. +### Example of File Discovery +To scrape a set of log files defined manually on a machine, one can use `static_configs` or `file_sd_configs`. Using static_configs, one must reload Promtail to apply modifications. Using `file_sd_configs` that reload is not needed. Promtail reloads discovery files when they are updated. + +The below excerpt of Promtail's configuration show a file_sd_configs that is used to scrape `apt` and `dpkg`'s logs. + +```yaml +scrape_configs: + - job_name: apt-dpkg + file_sd_configs: + - files: + - /etc/promtail/dpkg-apt.yaml + refresh_interval: 5m +``` +The targets to be scraped by Promtail are defined in `/etc/promtail/dpkg-apt.yaml`. In fact, Promtail read the target to scrape in the list of file provided under `files`. + +Below is the content of `/etc/promtail/dpkg-apt.yaml`. +```yaml +- targets: ["localhost"] + labels: + job: dpkg + __path__: /var/log/dpkg.log +- targets: ["localhost"] + labels: + job: apt + __path__: /var/log/apt/*.log +``` + +As one can realize, `/etc/promtail/dpkg-apt.yaml` contains the list of targets we would have defined under [static_configs](https://grafana.com/docs/loki/latest/send-data/promtail/configuration/#static_configs). +It defines two targets. The first one with label job set to `dpkg` and `__path__` specifying dpkg's log file: `/var/log/dpkg.log`. The second has two labels: the label `job` and again `__path__` specifying the path to APT's log files. This `__path__` contains a glob. Every log file matching that regular expression will be scrapped under that target. +To summarize, the above `/etc/promtail/dpkg-apt.yaml` showcase YAML format of file_sd_config discovery file. The JSON format can be seen [here](https://grafana.com/docs/loki/latest/send-data/promtail/configuration/#file_sd_config), + ### Kubernetes Discovery While Promtail can use the Kubernetes API to discover pods as From 6c33809015bef8078b17dcb6b0701e930132f042 Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Wed, 17 Apr 2024 11:24:47 -0600 Subject: [PATCH 7/8] feat: split detected fields queries (#12491) --- cmd/loki/loki-local-config.yaml | 3 + pkg/ingester/ingester.go | 3 +- pkg/logproto/logproto.pb.go | 431 ++++++++++++-------- pkg/logproto/logproto.proto | 4 + pkg/loki/modules.go | 7 +- pkg/lokifrontend/frontend/v2/frontend.go | 8 + pkg/querier/http.go | 3 +- pkg/querier/multi_tenant_querier.go | 3 +- pkg/querier/querier.go | 34 +- pkg/querier/queryrange/codec.go | 106 ++++- pkg/querier/queryrange/codec_test.go | 97 +++++ pkg/querier/queryrange/roundtrip.go | 67 ++- pkg/querier/queryrange/split_by_interval.go | 5 + pkg/querier/queryrange/splitters.go | 14 + pkg/storage/detected/fields.go | 93 +++++ pkg/storage/detected/fields_test.go | 94 +++++ 16 files changed, 781 insertions(+), 191 deletions(-) create mode 100644 pkg/storage/detected/fields.go create mode 100644 pkg/storage/detected/fields_test.go diff --git a/cmd/loki/loki-local-config.yaml b/cmd/loki/loki-local-config.yaml index e448dfd9f1fa3..03b579647753a 100644 --- a/cmd/loki/loki-local-config.yaml +++ b/cmd/loki/loki-local-config.yaml @@ -36,6 +36,9 @@ schema_config: ruler: alertmanager_url: http://localhost:9093 +frontend: + encoding: protobuf + # By default, Loki will send anonymous, but uniquely-identifiable usage and configuration # analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/ # diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index f99c7fe51b978..b0d197623d3c7 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1365,7 +1365,7 @@ func adjustQueryStartTime(maxLookBackPeriod time.Duration, start, now time.Time) return start } -func (i *Ingester) GetDetectedFields(_ context.Context, _ *logproto.DetectedFieldsRequest) (*logproto.DetectedFieldsResponse, error) { +func (i *Ingester) GetDetectedFields(_ context.Context, r *logproto.DetectedFieldsRequest) (*logproto.DetectedFieldsResponse, error) { return &logproto.DetectedFieldsResponse{ Fields: []*logproto.DetectedField{ { @@ -1374,6 +1374,7 @@ func (i *Ingester) GetDetectedFields(_ context.Context, _ *logproto.DetectedFiel Cardinality: 1, }, }, + FieldLimit: r.GetFieldLimit(), }, nil } diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index c478e47e89c6d..300fd59539b94 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -2699,7 +2699,8 @@ func (m *DetectedFieldsRequest) GetStep() int64 { } type DetectedFieldsResponse struct { - Fields []*DetectedField `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` + Fields []*DetectedField `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` + FieldLimit uint32 `protobuf:"varint,2,opt,name=fieldLimit,proto3" json:"fieldLimit,omitempty"` } func (m *DetectedFieldsResponse) Reset() { *m = DetectedFieldsResponse{} } @@ -2741,10 +2742,20 @@ func (m *DetectedFieldsResponse) GetFields() []*DetectedField { return nil } +func (m *DetectedFieldsResponse) GetFieldLimit() uint32 { + if m != nil { + return m.FieldLimit + } + return 0 +} + +// TODO: make the detected field include the serialized sketch +// we only want cardinality in the JSON response type DetectedField struct { Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` Type DetectedFieldType `protobuf:"bytes,2,opt,name=type,proto3,casttype=DetectedFieldType" json:"type,omitempty"` Cardinality uint64 `protobuf:"varint,3,opt,name=cardinality,proto3" json:"cardinality,omitempty"` + Sketch []byte `protobuf:"bytes,4,opt,name=sketch,proto3" json:"-"` } func (m *DetectedField) Reset() { *m = DetectedField{} } @@ -2800,6 +2811,13 @@ func (m *DetectedField) GetCardinality() uint64 { return 0 } +func (m *DetectedField) GetSketch() []byte { + if m != nil { + return m.Sketch + } + return nil +} + type DetectedLabelsRequest struct { Start *time.Time `protobuf:"bytes,1,opt,name=start,proto3,stdtime" json:"start,omitempty"` End *time.Time `protobuf:"bytes,2,opt,name=end,proto3,stdtime" json:"end,omitempty"` @@ -3015,168 +3033,169 @@ func init() { func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } var fileDescriptor_c28a5f14f1f4c79a = []byte{ - // 2569 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x39, 0xcb, 0x6f, 0x1b, 0xc7, - 0xf9, 0x5c, 0x72, 0xf9, 0xfa, 0x48, 0xc9, 0xd2, 0x88, 0x96, 0x09, 0xda, 0x21, 0x95, 0xc1, 0xef, - 0x97, 0xa8, 0xb1, 0x23, 0xc6, 0x72, 0xed, 0x3a, 0x76, 0xdd, 0xd4, 0x94, 0x62, 0x45, 0xb6, 0xfc, - 0xc8, 0x48, 0x71, 0xd2, 0xa2, 0x86, 0xb1, 0x22, 0x47, 0xd4, 0xc2, 0xe4, 0x2e, 0xbd, 0x3b, 0xb4, - 0xcd, 0x5b, 0xff, 0x81, 0xa2, 0x01, 0x7a, 0x68, 0x7b, 0x29, 0x50, 0xa0, 0x40, 0x8b, 0x14, 0xbd, - 0x14, 0x3d, 0x16, 0xed, 0xa5, 0x07, 0xf7, 0xe6, 0xde, 0x82, 0x1c, 0xd8, 0x5a, 0xbe, 0x14, 0x3a, - 0x05, 0xe8, 0x2d, 0xa7, 0x62, 0x1e, 0xfb, 0x14, 0x59, 0x87, 0x8a, 0x83, 0xc0, 0x17, 0x72, 0xe6, - 0x9b, 0x6f, 0xbe, 0x99, 0xef, 0x31, 0xdf, 0x6b, 0xe1, 0x78, 0xef, 0x5e, 0xbb, 0xde, 0xb1, 0xdb, - 0x3d, 0xc7, 0x66, 0xb6, 0x3f, 0x58, 0x12, 0xbf, 0x28, 0xe7, 0xcd, 0x2b, 0xa5, 0xb6, 0xdd, 0xb6, - 0x25, 0x0e, 0x1f, 0xc9, 0xf5, 0x4a, 0xad, 0x6d, 0xdb, 0xed, 0x0e, 0xad, 0x8b, 0xd9, 0x76, 0x7f, - 0xa7, 0xce, 0xcc, 0x2e, 0x75, 0x99, 0xd1, 0xed, 0x29, 0x84, 0x05, 0x45, 0xfd, 0x7e, 0xa7, 0x6b, - 0xb7, 0x68, 0xa7, 0xee, 0x32, 0x83, 0xb9, 0xf2, 0x57, 0x61, 0xcc, 0x71, 0x8c, 0x5e, 0xdf, 0xdd, - 0x15, 0x3f, 0x12, 0x88, 0xff, 0xa4, 0xc1, 0xd1, 0x0d, 0x63, 0x9b, 0x76, 0xb6, 0xec, 0xdb, 0x46, - 0xa7, 0x4f, 0x5d, 0x42, 0xdd, 0x9e, 0x6d, 0xb9, 0x14, 0xad, 0x40, 0xa6, 0xc3, 0x17, 0xdc, 0xb2, - 0xb6, 0x90, 0x5a, 0x2c, 0x2c, 0x9f, 0x5c, 0xf2, 0xaf, 0x3c, 0x72, 0x83, 0x84, 0xba, 0xef, 0x5a, - 0xcc, 0x19, 0x10, 0xb5, 0xb5, 0x72, 0x1b, 0x0a, 0x21, 0x30, 0x9a, 0x81, 0xd4, 0x3d, 0x3a, 0x28, - 0x6b, 0x0b, 0xda, 0x62, 0x9e, 0xf0, 0x21, 0x3a, 0x0d, 0xe9, 0x07, 0x9c, 0x4c, 0x39, 0xb9, 0xa0, - 0x2d, 0x16, 0x96, 0x8f, 0x07, 0x87, 0x7c, 0x60, 0x99, 0xf7, 0xfb, 0x54, 0xec, 0x56, 0x07, 0x49, - 0xcc, 0x0b, 0xc9, 0xf3, 0x1a, 0x3e, 0x09, 0xb3, 0x07, 0xd6, 0xd1, 0x3c, 0x64, 0x04, 0x86, 0xbc, - 0x71, 0x9e, 0xa8, 0x19, 0x2e, 0x01, 0xda, 0x64, 0x0e, 0x35, 0xba, 0xc4, 0x60, 0xfc, 0xbe, 0xf7, - 0xfb, 0xd4, 0x65, 0xf8, 0x3a, 0xcc, 0x45, 0xa0, 0x8a, 0xed, 0x73, 0x50, 0x70, 0x03, 0xb0, 0xe2, - 0xbd, 0x14, 0x5c, 0x2b, 0xd8, 0x43, 0xc2, 0x88, 0xf8, 0x57, 0x1a, 0x40, 0xb0, 0x86, 0xaa, 0x00, - 0x72, 0xf5, 0x3d, 0xc3, 0xdd, 0x15, 0x0c, 0xeb, 0x24, 0x04, 0x41, 0xa7, 0x60, 0x36, 0x98, 0xdd, - 0xb0, 0x37, 0x77, 0x0d, 0xa7, 0x25, 0x64, 0xa0, 0x93, 0x83, 0x0b, 0x08, 0x81, 0xee, 0x18, 0x8c, - 0x96, 0x53, 0x0b, 0xda, 0x62, 0x8a, 0x88, 0x31, 0xe7, 0x96, 0x51, 0xcb, 0xb0, 0x58, 0x59, 0x17, - 0xe2, 0x54, 0x33, 0x0e, 0xe7, 0xfa, 0xa5, 0x6e, 0x39, 0xbd, 0xa0, 0x2d, 0x4e, 0x11, 0x35, 0xc3, - 0x9f, 0xa4, 0xa0, 0xf8, 0x7e, 0x9f, 0x3a, 0x03, 0x25, 0x00, 0x54, 0x85, 0x9c, 0x4b, 0x3b, 0xb4, - 0xc9, 0x6c, 0x47, 0x6a, 0xa4, 0x91, 0x2c, 0x6b, 0xc4, 0x87, 0xa1, 0x12, 0xa4, 0x3b, 0x66, 0xd7, - 0x64, 0xe2, 0x5a, 0x53, 0x44, 0x4e, 0xd0, 0x05, 0x48, 0xbb, 0xcc, 0x70, 0x98, 0xb8, 0x4b, 0x61, - 0xb9, 0xb2, 0x24, 0x0d, 0x73, 0xc9, 0x33, 0xcc, 0xa5, 0x2d, 0xcf, 0x30, 0x1b, 0xb9, 0xc7, 0xc3, - 0x5a, 0xe2, 0xe3, 0x7f, 0xd6, 0x34, 0x22, 0xb7, 0xa0, 0x73, 0x90, 0xa2, 0x56, 0x4b, 0xdc, 0xf7, - 0xcb, 0xee, 0xe4, 0x1b, 0xd0, 0x69, 0xc8, 0xb7, 0x4c, 0x87, 0x36, 0x99, 0x69, 0x5b, 0x82, 0xab, - 0xe9, 0xe5, 0xb9, 0x40, 0x23, 0xab, 0xde, 0x12, 0x09, 0xb0, 0xd0, 0x29, 0xc8, 0xb8, 0x5c, 0x74, - 0x6e, 0x39, 0xcb, 0x6d, 0xa1, 0x51, 0xda, 0x1f, 0xd6, 0x66, 0x24, 0xe4, 0x94, 0xdd, 0x35, 0x19, - 0xed, 0xf6, 0xd8, 0x80, 0x28, 0x1c, 0xf4, 0x06, 0x64, 0x5b, 0xb4, 0x43, 0xb9, 0xc2, 0x73, 0x42, - 0xe1, 0x33, 0x21, 0xf2, 0x62, 0x81, 0x78, 0x08, 0xe8, 0x0e, 0xe8, 0xbd, 0x8e, 0x61, 0x95, 0xf3, - 0x82, 0x8b, 0xe9, 0x00, 0xf1, 0x56, 0xc7, 0xb0, 0x1a, 0x6f, 0x7f, 0x36, 0xac, 0x9d, 0x6d, 0x9b, - 0x6c, 0xb7, 0xbf, 0xbd, 0xd4, 0xb4, 0xbb, 0xf5, 0xb6, 0x63, 0xec, 0x18, 0x96, 0x51, 0xef, 0xd8, - 0xf7, 0xcc, 0xfa, 0x83, 0x33, 0x75, 0xfe, 0x06, 0xef, 0xf7, 0xa9, 0x63, 0x52, 0xa7, 0xce, 0xc9, - 0x2c, 0x09, 0x95, 0xf0, 0xad, 0x44, 0x90, 0xbd, 0xaa, 0xe7, 0x32, 0x33, 0x59, 0xfc, 0x34, 0x09, - 0x68, 0xd3, 0xe8, 0xf6, 0x3a, 0x74, 0x22, 0x95, 0xf9, 0xca, 0x49, 0x1e, 0x5a, 0x39, 0xa9, 0x49, - 0x95, 0x13, 0x48, 0x5a, 0x9f, 0x4c, 0xd2, 0xe9, 0x2f, 0x2b, 0xe9, 0xcc, 0xd7, 0x22, 0x69, 0x5c, - 0x06, 0x9d, 0xcf, 0xb8, 0x53, 0x72, 0x8c, 0x87, 0x42, 0x9e, 0x45, 0xc2, 0x87, 0x78, 0x03, 0x32, - 0xf2, 0x2e, 0xa8, 0x12, 0x17, 0x78, 0xf4, 0x7d, 0x04, 0xc2, 0x4e, 0x79, 0x62, 0x9c, 0x09, 0xc4, - 0x98, 0x12, 0x02, 0xc2, 0x7f, 0xd6, 0x60, 0x4a, 0x69, 0x51, 0xf9, 0x98, 0x6d, 0xc8, 0xca, 0x37, - 0xee, 0xf9, 0x97, 0x63, 0x71, 0xff, 0x72, 0xb9, 0x65, 0xf4, 0x18, 0x75, 0x1a, 0xf5, 0xc7, 0xc3, - 0x9a, 0xf6, 0xd9, 0xb0, 0xf6, 0xfa, 0x38, 0x46, 0x3d, 0x9f, 0xee, 0xf9, 0x25, 0x8f, 0x30, 0x3a, - 0x29, 0x6e, 0xc7, 0x5c, 0x65, 0x0a, 0x47, 0x96, 0x64, 0x28, 0x58, 0xb7, 0xda, 0xd4, 0xe5, 0x94, - 0x75, 0xae, 0x45, 0x22, 0x71, 0x38, 0x9b, 0x0f, 0x0d, 0xc7, 0x32, 0xad, 0xb6, 0x5b, 0x4e, 0x09, - 0xdf, 0xe9, 0xcf, 0xf1, 0x2f, 0x34, 0x98, 0x8b, 0x98, 0xa2, 0x62, 0xe2, 0x3c, 0x64, 0x5c, 0x2e, - 0x5d, 0x8f, 0x87, 0x90, 0x22, 0x37, 0x05, 0xbc, 0x31, 0xad, 0x2e, 0x9f, 0x91, 0x73, 0xa2, 0xf0, - 0x5f, 0xdc, 0xd5, 0xfe, 0xa6, 0x41, 0x51, 0x04, 0x00, 0xef, 0x7d, 0x20, 0xd0, 0x2d, 0xa3, 0x4b, - 0x95, 0xaa, 0xc4, 0x38, 0x14, 0x15, 0xf8, 0x71, 0x39, 0x2f, 0x2a, 0x4c, 0xea, 0xc8, 0xb4, 0x43, - 0x3b, 0x32, 0x2d, 0x78, 0x2b, 0x25, 0x48, 0x73, 0x93, 0x1c, 0x08, 0x27, 0x96, 0x27, 0x72, 0x82, - 0x5f, 0x87, 0x29, 0xc5, 0x85, 0x12, 0xed, 0xb8, 0x40, 0xd6, 0x85, 0x8c, 0xd4, 0x04, 0xfa, 0x3f, - 0xc8, 0xfb, 0x09, 0x80, 0xe0, 0x36, 0xd5, 0xc8, 0xec, 0x0f, 0x6b, 0x49, 0xe6, 0x92, 0x60, 0x01, - 0xd5, 0xc2, 0xc1, 0x55, 0x6b, 0xe4, 0xf7, 0x87, 0x35, 0x09, 0x50, 0xa1, 0x14, 0x9d, 0x00, 0x7d, - 0x97, 0xc7, 0x27, 0x2e, 0x02, 0xbd, 0x91, 0xdb, 0x1f, 0xd6, 0xc4, 0x9c, 0x88, 0x5f, 0xbc, 0x06, - 0xc5, 0x0d, 0xda, 0x36, 0x9a, 0x03, 0x75, 0x68, 0xc9, 0x23, 0xc7, 0x0f, 0xd4, 0x3c, 0x1a, 0xaf, - 0x42, 0xd1, 0x3f, 0xf1, 0x6e, 0xd7, 0x55, 0xaf, 0xa1, 0xe0, 0xc3, 0xae, 0xbb, 0xf8, 0x97, 0x1a, - 0x28, 0x1b, 0x40, 0x38, 0x94, 0x55, 0x70, 0xff, 0x05, 0xfb, 0xc3, 0x9a, 0x82, 0x78, 0x49, 0x03, - 0xba, 0x08, 0x59, 0x57, 0x9c, 0xc8, 0x89, 0xc5, 0x4d, 0x4b, 0x2c, 0x34, 0x8e, 0x70, 0x13, 0xd9, - 0x1f, 0xd6, 0x3c, 0x44, 0xe2, 0x0d, 0xd0, 0x52, 0x24, 0xf0, 0x4a, 0xc6, 0xa6, 0xf7, 0x87, 0xb5, - 0x10, 0x34, 0x1c, 0x88, 0xf1, 0x17, 0x1a, 0x14, 0xb6, 0x0c, 0xd3, 0x37, 0xa1, 0xb2, 0xa7, 0xa2, - 0xc0, 0xbf, 0x4a, 0x00, 0xb7, 0xc4, 0x16, 0xed, 0x18, 0x83, 0x2b, 0xb6, 0x23, 0xe8, 0x4e, 0x11, - 0x7f, 0x1e, 0xc4, 0x4a, 0x7d, 0x64, 0xac, 0x4c, 0x4f, 0xee, 0x8e, 0xbf, 0x5e, 0xe7, 0x77, 0x55, - 0xcf, 0x25, 0x67, 0x52, 0xf8, 0x0f, 0x1a, 0x14, 0x25, 0xf3, 0xca, 0xf2, 0x7e, 0x04, 0x19, 0x29, - 0x1b, 0xc1, 0xfe, 0xff, 0x70, 0x4c, 0x27, 0x27, 0x71, 0x4a, 0x8a, 0x26, 0x7a, 0x07, 0xa6, 0x5b, - 0x8e, 0xdd, 0xeb, 0xd1, 0xd6, 0xa6, 0x72, 0x7f, 0xc9, 0xb8, 0xfb, 0x5b, 0x0d, 0xaf, 0x93, 0x18, - 0x3a, 0xfe, 0xbb, 0x06, 0x53, 0xca, 0x99, 0x28, 0x75, 0xf9, 0x22, 0xd6, 0x0e, 0x1d, 0xf1, 0x92, - 0x93, 0x46, 0xbc, 0x79, 0xc8, 0xb4, 0x1d, 0xbb, 0xdf, 0xf3, 0x1c, 0x92, 0x9a, 0x4d, 0x16, 0x09, - 0xf1, 0x55, 0x98, 0xf6, 0x58, 0x19, 0xe3, 0x51, 0x2b, 0x71, 0x8f, 0xba, 0xde, 0xa2, 0x16, 0x33, - 0x77, 0x4c, 0xdf, 0x47, 0x2a, 0x7c, 0xfc, 0x53, 0x0d, 0x66, 0xe2, 0x28, 0x68, 0x35, 0x96, 0xc0, - 0xbf, 0x36, 0x9e, 0x5c, 0x38, 0x77, 0xf7, 0x48, 0xab, 0x0c, 0xfe, 0xec, 0xf3, 0x32, 0xf8, 0x52, - 0xd8, 0xc9, 0xe4, 0x95, 0x57, 0xc0, 0x3f, 0xd7, 0x60, 0x2a, 0xa2, 0x4b, 0x74, 0x1e, 0xf4, 0x1d, - 0xc7, 0xee, 0x4e, 0xa4, 0x28, 0xb1, 0x03, 0x7d, 0x1b, 0x92, 0xcc, 0x9e, 0x48, 0x4d, 0x49, 0x66, - 0x73, 0x2d, 0x29, 0xf6, 0x53, 0x32, 0x3f, 0x96, 0x33, 0x7c, 0x16, 0xf2, 0x82, 0xa1, 0x5b, 0x86, - 0xe9, 0x8c, 0x0c, 0x18, 0xa3, 0x19, 0xba, 0x08, 0x47, 0xa4, 0x33, 0x1c, 0xbd, 0xb9, 0x38, 0x6a, - 0x73, 0xd1, 0xdb, 0x7c, 0x1c, 0xd2, 0x2b, 0xbb, 0x7d, 0xeb, 0x1e, 0xdf, 0xd2, 0x32, 0x98, 0xe1, - 0x6d, 0xe1, 0x63, 0x7c, 0x14, 0xe6, 0xf8, 0x1b, 0xa4, 0x8e, 0xbb, 0x62, 0xf7, 0x2d, 0xe6, 0xd5, - 0x27, 0xa7, 0xa0, 0x14, 0x05, 0x2b, 0x2b, 0x29, 0x41, 0xba, 0xc9, 0x01, 0x82, 0xc6, 0x14, 0x91, - 0x13, 0xfc, 0x1b, 0x0d, 0xd0, 0x1a, 0x65, 0xe2, 0x94, 0xf5, 0x55, 0xff, 0x79, 0x54, 0x20, 0xd7, - 0x35, 0x58, 0x73, 0x97, 0x3a, 0xae, 0x97, 0xbf, 0x78, 0xf3, 0x6f, 0x22, 0x59, 0xc4, 0xa7, 0x61, - 0x2e, 0x72, 0x4b, 0xc5, 0x53, 0x05, 0x72, 0x4d, 0x05, 0x53, 0x21, 0xcf, 0x9f, 0xe3, 0x3f, 0x26, - 0x21, 0x27, 0x36, 0x10, 0xba, 0x83, 0x4e, 0x43, 0x61, 0xc7, 0xb4, 0xda, 0xd4, 0xe9, 0x39, 0xa6, - 0x12, 0x81, 0xde, 0x38, 0xb2, 0x3f, 0xac, 0x85, 0xc1, 0x24, 0x3c, 0x41, 0x6f, 0x42, 0xb6, 0xef, - 0x52, 0xe7, 0xae, 0x29, 0x5f, 0x7a, 0xbe, 0x51, 0xda, 0x1b, 0xd6, 0x32, 0x1f, 0xb8, 0xd4, 0x59, - 0x5f, 0xe5, 0xc1, 0xa7, 0x2f, 0x46, 0x44, 0xfe, 0xb7, 0xd0, 0x35, 0x65, 0xa6, 0x22, 0x81, 0x6b, - 0x7c, 0x87, 0x5f, 0x3f, 0xe6, 0xea, 0x7a, 0x8e, 0xdd, 0xa5, 0x6c, 0x97, 0xf6, 0xdd, 0x7a, 0xd3, - 0xee, 0x76, 0x6d, 0xab, 0x2e, 0x2a, 0x6e, 0xc1, 0x34, 0x8f, 0xa0, 0x7c, 0xbb, 0xb2, 0xdc, 0x2d, - 0xc8, 0xb2, 0x5d, 0xc7, 0xee, 0xb7, 0x77, 0x45, 0x60, 0x48, 0x35, 0x2e, 0x4c, 0x4e, 0xcf, 0xa3, - 0x40, 0xbc, 0x01, 0x7a, 0x95, 0x4b, 0x8b, 0x36, 0xef, 0xb9, 0xfd, 0xae, 0xac, 0xf1, 0x1a, 0xe9, - 0xfd, 0x61, 0x4d, 0x7b, 0x93, 0xf8, 0x60, 0xfc, 0x93, 0x24, 0xd4, 0x42, 0xa5, 0xf1, 0x15, 0xdb, - 0xb9, 0x4e, 0x99, 0x63, 0x36, 0x6f, 0x18, 0x5d, 0xea, 0xd9, 0x46, 0x0d, 0x0a, 0x5d, 0x01, 0xbc, - 0x1b, 0x7a, 0x02, 0xd0, 0xf5, 0xf1, 0xd0, 0x2b, 0x00, 0xe2, 0xcd, 0xc8, 0x75, 0xf9, 0x1a, 0xf2, - 0x02, 0x22, 0x96, 0x57, 0x22, 0x92, 0xaa, 0x4f, 0xc8, 0x99, 0x92, 0xd0, 0x7a, 0x5c, 0x42, 0x13, - 0xd3, 0xf1, 0xc5, 0x12, 0xb6, 0xf5, 0x74, 0xd4, 0xd6, 0xf1, 0x3f, 0x34, 0xa8, 0x6e, 0x78, 0x37, - 0x3f, 0xa4, 0x38, 0x3c, 0x7e, 0x93, 0x2f, 0x88, 0xdf, 0xd4, 0x57, 0xe3, 0x17, 0x57, 0x01, 0x36, - 0x4c, 0x8b, 0x5e, 0x31, 0x3b, 0x8c, 0x3a, 0x23, 0xaa, 0x98, 0x9f, 0xa5, 0x02, 0x97, 0x40, 0xe8, - 0x8e, 0xc7, 0xe7, 0x4a, 0xc8, 0x0f, 0xbf, 0x08, 0x36, 0x92, 0x2f, 0x50, 0x6d, 0xa9, 0x98, 0x8b, - 0xb2, 0x20, 0xbb, 0x23, 0xd8, 0x93, 0x21, 0x35, 0xd2, 0x88, 0x09, 0x78, 0x6f, 0x7c, 0x4f, 0x1d, - 0x7e, 0xee, 0x39, 0x19, 0x91, 0x68, 0x8f, 0xd5, 0xdd, 0x81, 0xc5, 0x8c, 0x47, 0xa1, 0xfd, 0xc4, - 0x3b, 0x04, 0x19, 0x2a, 0xe9, 0x4a, 0x8f, 0x4c, 0xba, 0x2e, 0xa9, 0x63, 0xbe, 0x52, 0xd5, 0x79, - 0x29, 0xf0, 0x80, 0x42, 0x29, 0xca, 0x03, 0xbe, 0x06, 0xba, 0x43, 0x77, 0xbc, 0x50, 0x8d, 0x82, - 0x93, 0x7d, 0x4c, 0xb1, 0x8e, 0xff, 0xa2, 0xc1, 0xcc, 0x1a, 0x65, 0xd1, 0x24, 0xe8, 0x25, 0x52, - 0x29, 0x7e, 0x0f, 0x66, 0x43, 0xf7, 0x57, 0xdc, 0x9f, 0x89, 0x65, 0x3e, 0x47, 0x03, 0xfe, 0xd7, - 0xad, 0x16, 0x7d, 0xa4, 0x0a, 0xca, 0x68, 0xd2, 0x73, 0x0b, 0x0a, 0xa1, 0x45, 0x74, 0x39, 0x96, - 0xee, 0xcc, 0xc5, 0xfa, 0x95, 0x3c, 0x64, 0x37, 0x4a, 0x8a, 0x27, 0x59, 0x36, 0xaa, 0x64, 0xd6, - 0x4f, 0x0d, 0x36, 0x01, 0x09, 0x75, 0x09, 0xb2, 0xe1, 0xe0, 0x24, 0xa0, 0xd7, 0xfc, 0xbc, 0xc7, - 0x9f, 0xa3, 0x57, 0x41, 0x77, 0xec, 0x87, 0x5e, 0x1e, 0x3b, 0x15, 0x1c, 0x49, 0xec, 0x87, 0x44, - 0x2c, 0xe1, 0x8b, 0x90, 0x22, 0xf6, 0x43, 0x54, 0x05, 0x70, 0x0c, 0xab, 0x4d, 0x6f, 0xfb, 0x15, - 0x54, 0x91, 0x84, 0x20, 0x63, 0x12, 0x87, 0x15, 0x98, 0x0d, 0xdf, 0x48, 0xaa, 0x7b, 0x09, 0xb2, - 0xef, 0xf7, 0xc3, 0xe2, 0x2a, 0xc5, 0xc4, 0x25, 0x0b, 0x75, 0x0f, 0x89, 0xdb, 0x0c, 0x04, 0x70, - 0x74, 0x02, 0xf2, 0xcc, 0xd8, 0xee, 0xd0, 0x1b, 0x81, 0x9b, 0x0b, 0x00, 0x7c, 0x95, 0x17, 0x7f, - 0xb7, 0x43, 0x19, 0x50, 0x00, 0x40, 0x6f, 0xc0, 0x4c, 0x70, 0xe7, 0x5b, 0x0e, 0xdd, 0x31, 0x1f, - 0x09, 0x0d, 0x17, 0xc9, 0x01, 0x38, 0x5a, 0x84, 0x23, 0x01, 0x6c, 0x53, 0x64, 0x1a, 0xba, 0x40, - 0x8d, 0x83, 0xb9, 0x6c, 0x04, 0xbb, 0xef, 0xde, 0xef, 0x1b, 0x1d, 0xf1, 0xf8, 0x8a, 0x24, 0x04, - 0xc1, 0x7f, 0xd5, 0x60, 0x56, 0xaa, 0x9a, 0x19, 0xec, 0xa5, 0xb4, 0xfa, 0xdf, 0x6a, 0x80, 0xc2, - 0x1c, 0x28, 0xd3, 0xfa, 0xff, 0x70, 0x23, 0x88, 0xa7, 0x32, 0x05, 0x51, 0xd3, 0x4a, 0x50, 0xd0, - 0xcb, 0xc1, 0x90, 0x11, 0xe9, 0x90, 0x2c, 0xae, 0x75, 0x59, 0x34, 0x4b, 0x08, 0x51, 0xff, 0xbc, - 0xd6, 0xdf, 0x1e, 0x30, 0xea, 0xaa, 0x92, 0x57, 0xd4, 0xfa, 0x02, 0x40, 0xe4, 0x1f, 0x3f, 0x8b, - 0x5a, 0x4c, 0x58, 0x8d, 0x1e, 0x9c, 0xa5, 0x40, 0xc4, 0x1b, 0xe0, 0xdf, 0x27, 0x61, 0xea, 0xb6, - 0xdd, 0xe9, 0x07, 0x81, 0xf1, 0x65, 0x0a, 0x18, 0x91, 0x3a, 0x3c, 0xed, 0xd5, 0xe1, 0x08, 0x74, - 0x97, 0xd1, 0x9e, 0xb0, 0xac, 0x14, 0x11, 0x63, 0x84, 0xa1, 0xc8, 0x0c, 0xa7, 0x4d, 0x99, 0xac, - 0x6e, 0xca, 0x19, 0x91, 0x76, 0x46, 0x60, 0x68, 0x01, 0x0a, 0x46, 0xbb, 0xed, 0xd0, 0xb6, 0xc1, - 0x68, 0x63, 0x50, 0xce, 0x8a, 0xc3, 0xc2, 0x20, 0xfc, 0x11, 0x4c, 0x7b, 0xc2, 0x52, 0x2a, 0x7d, - 0x0b, 0xb2, 0x0f, 0x04, 0x64, 0x44, 0x5f, 0x4c, 0xa2, 0x2a, 0x37, 0xe6, 0xa1, 0x45, 0xfb, 0xec, - 0xde, 0x9d, 0xf1, 0x55, 0xc8, 0x48, 0x74, 0x74, 0x22, 0x5c, 0xa3, 0xc8, 0x26, 0x0d, 0x9f, 0xab, - 0x82, 0x03, 0x43, 0x46, 0x12, 0x52, 0x8a, 0x17, 0xb6, 0x21, 0x21, 0x44, 0xfd, 0xe3, 0xff, 0x68, - 0x70, 0x74, 0x95, 0x32, 0xda, 0x64, 0xb4, 0x75, 0xc5, 0xa4, 0x9d, 0xd6, 0x37, 0x5a, 0x3e, 0xfb, - 0x4d, 0xb0, 0x54, 0xa8, 0x09, 0xc6, 0xfd, 0x4e, 0xc7, 0xb4, 0xe8, 0x46, 0xa8, 0x8b, 0x12, 0x00, - 0xb8, 0x87, 0xd8, 0xe1, 0x17, 0x97, 0xcb, 0xf2, 0xc3, 0x46, 0x08, 0xe2, 0x6b, 0x38, 0x13, 0x68, - 0x18, 0xaf, 0xc3, 0x7c, 0x9c, 0x69, 0xa5, 0xa3, 0x3a, 0x64, 0xc4, 0xde, 0x11, 0xed, 0xd7, 0xc8, - 0x0e, 0xa2, 0xd0, 0xb0, 0x03, 0x53, 0x91, 0x05, 0xa1, 0x33, 0x6e, 0x23, 0xca, 0x7f, 0xca, 0x09, - 0xfa, 0x16, 0xe8, 0x6c, 0xd0, 0x53, 0x6e, 0xb3, 0x71, 0xf4, 0x8b, 0x61, 0x6d, 0x36, 0xb2, 0x6d, - 0x6b, 0xd0, 0xa3, 0x44, 0xa0, 0x70, 0xd3, 0x6a, 0x1a, 0x4e, 0xcb, 0xb4, 0x8c, 0x8e, 0xc9, 0xa4, - 0x28, 0x74, 0x12, 0x06, 0xe1, 0x5f, 0x87, 0x94, 0x26, 0xed, 0xf1, 0x90, 0x4a, 0xd3, 0x0e, 0xad, - 0x34, 0xed, 0x39, 0x4a, 0xc3, 0x3f, 0x08, 0x44, 0xec, 0x5d, 0x51, 0x89, 0xf8, 0x1d, 0x98, 0x6e, - 0x45, 0x56, 0xc6, 0x8b, 0x5a, 0xf6, 0x3e, 0x63, 0xe8, 0x78, 0x2d, 0x10, 0xb9, 0x80, 0x8c, 0x11, - 0x79, 0x4c, 0x8e, 0xc9, 0x03, 0x72, 0x7c, 0xe3, 0x35, 0xc8, 0xfb, 0x5f, 0x88, 0x50, 0x01, 0xb2, - 0x57, 0x6e, 0x92, 0x0f, 0x2f, 0x93, 0xd5, 0x99, 0x04, 0x2a, 0x42, 0xae, 0x71, 0x79, 0xe5, 0x9a, - 0x98, 0x69, 0xcb, 0x9f, 0x64, 0xbc, 0xb0, 0xea, 0xa0, 0xef, 0x42, 0x5a, 0xc6, 0xca, 0xf9, 0xe0, - 0xba, 0xe1, 0x0f, 0x31, 0x95, 0x63, 0x07, 0xe0, 0x92, 0x6f, 0x9c, 0x78, 0x4b, 0x43, 0x37, 0xa0, - 0x20, 0x80, 0xaa, 0x6d, 0x7a, 0x22, 0xde, 0xbd, 0x8c, 0x50, 0x7a, 0x65, 0xcc, 0x6a, 0x88, 0xde, - 0x05, 0x48, 0x4b, 0x11, 0xcc, 0xc7, 0x52, 0x9a, 0x11, 0xb7, 0x89, 0x34, 0x92, 0x71, 0x02, 0xbd, - 0x0d, 0xfa, 0x96, 0x61, 0x76, 0x50, 0x28, 0xa3, 0x0a, 0x75, 0x3b, 0x2b, 0xf3, 0x71, 0x70, 0xe8, - 0xd8, 0x4b, 0x7e, 0xd3, 0xf6, 0x58, 0xbc, 0x73, 0xe4, 0x6d, 0x2f, 0x1f, 0x5c, 0xf0, 0x4f, 0xbe, - 0x29, 0x5b, 0x8b, 0x5e, 0xff, 0x02, 0xbd, 0x12, 0x3d, 0x2a, 0xd6, 0xee, 0xa8, 0x54, 0xc7, 0x2d, - 0xfb, 0x04, 0x37, 0xa0, 0x10, 0xea, 0x1d, 0x84, 0xc5, 0x7a, 0xb0, 0xf1, 0x11, 0x16, 0xeb, 0x88, - 0x86, 0x03, 0x4e, 0xa0, 0x35, 0xc8, 0xf1, 0x3c, 0x54, 0x7c, 0x63, 0x38, 0x1e, 0x4f, 0x37, 0x43, - 0x69, 0x46, 0xe5, 0xc4, 0xe8, 0x45, 0x9f, 0xd0, 0xf7, 0x21, 0xbf, 0x46, 0x99, 0xf2, 0xd5, 0xc7, - 0xe2, 0xce, 0x7e, 0x84, 0xa4, 0xa2, 0x01, 0x03, 0x27, 0xd0, 0x47, 0x22, 0x25, 0x8e, 0xfa, 0x2a, - 0x54, 0x1b, 0xe3, 0x93, 0xfc, 0x7b, 0x2d, 0x8c, 0x47, 0xf0, 0x29, 0x7f, 0x18, 0xa1, 0xac, 0xa2, - 0x5a, 0x6d, 0xcc, 0x13, 0xf4, 0x29, 0xd7, 0x9e, 0xf3, 0xa5, 0x1f, 0x27, 0x96, 0xef, 0x78, 0x1f, - 0xbb, 0x57, 0x0d, 0x66, 0xa0, 0x9b, 0x30, 0x2d, 0x64, 0xe9, 0x7f, 0x0d, 0x8f, 0xd8, 0xfc, 0x81, - 0x4f, 0xef, 0x11, 0x9b, 0x3f, 0xf8, 0x09, 0x1e, 0x27, 0x1a, 0x77, 0x9e, 0x3c, 0xad, 0x26, 0x3e, - 0x7d, 0x5a, 0x4d, 0x7c, 0xfe, 0xb4, 0xaa, 0xfd, 0x78, 0xaf, 0xaa, 0xfd, 0x6e, 0xaf, 0xaa, 0x3d, - 0xde, 0xab, 0x6a, 0x4f, 0xf6, 0xaa, 0xda, 0xbf, 0xf6, 0xaa, 0xda, 0xbf, 0xf7, 0xaa, 0x89, 0xcf, - 0xf7, 0xaa, 0xda, 0xc7, 0xcf, 0xaa, 0x89, 0x27, 0xcf, 0xaa, 0x89, 0x4f, 0x9f, 0x55, 0x13, 0x3f, - 0x7c, 0xfd, 0xf9, 0xe5, 0x9f, 0x74, 0x74, 0x19, 0xf1, 0x77, 0xe6, 0xbf, 0x01, 0x00, 0x00, 0xff, - 0xff, 0x78, 0xeb, 0x77, 0xf7, 0x92, 0x21, 0x00, 0x00, + // 2590 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7, + 0x95, 0x4b, 0x2e, 0xbf, 0x1e, 0x29, 0x59, 0x1e, 0xd1, 0x36, 0x41, 0xdb, 0xa4, 0x32, 0x68, 0x13, + 0x35, 0x76, 0xc4, 0x58, 0x69, 0xd2, 0xc4, 0x69, 0x9a, 0x9a, 0x52, 0xac, 0xc8, 0x51, 0x1c, 0x67, + 0xa4, 0x38, 0x69, 0xd1, 0x20, 0x58, 0x91, 0x23, 0x6a, 0x21, 0x72, 0x97, 0xde, 0x1d, 0xc6, 0xe1, + 0xad, 0x7f, 0xa0, 0x68, 0x8a, 0x1e, 0xda, 0x5e, 0x0a, 0x14, 0x28, 0xd0, 0x22, 0x45, 0x2f, 0x45, + 0x8f, 0x45, 0x7b, 0xe9, 0x21, 0xbd, 0xa5, 0xb7, 0x20, 0x07, 0xb6, 0x56, 0x2e, 0x85, 0x4e, 0x01, + 0x7a, 0xcb, 0xa9, 0x98, 0xaf, 0xfd, 0x12, 0x59, 0x87, 0x8a, 0x83, 0xc0, 0x17, 0x71, 0xe6, 0xcd, + 0x9b, 0x37, 0xf3, 0x3e, 0xe6, 0x7d, 0xad, 0xe0, 0xfc, 0xe0, 0xa0, 0xdb, 0xec, 0xb9, 0xdd, 0x81, + 0xe7, 0x32, 0x37, 0x18, 0xac, 0x88, 0xbf, 0xa8, 0xa0, 0xe7, 0xb5, 0x4a, 0xd7, 0xed, 0xba, 0x12, + 0x87, 0x8f, 0xe4, 0x7a, 0xad, 0xd1, 0x75, 0xdd, 0x6e, 0x8f, 0x36, 0xc5, 0x6c, 0x77, 0xb8, 0xd7, + 0x64, 0x76, 0x9f, 0xfa, 0xcc, 0xea, 0x0f, 0x14, 0xc2, 0x92, 0xa2, 0x7e, 0xa7, 0xd7, 0x77, 0x3b, + 0xb4, 0xd7, 0xf4, 0x99, 0xc5, 0x7c, 0xf9, 0x57, 0x61, 0x2c, 0x72, 0x8c, 0xc1, 0xd0, 0xdf, 0x17, + 0x7f, 0x24, 0x10, 0xff, 0xd9, 0x80, 0x33, 0x5b, 0xd6, 0x2e, 0xed, 0xed, 0xb8, 0xb7, 0xad, 0xde, + 0x90, 0xfa, 0x84, 0xfa, 0x03, 0xd7, 0xf1, 0x29, 0x5a, 0x83, 0x5c, 0x8f, 0x2f, 0xf8, 0x55, 0x63, + 0x29, 0xb3, 0x5c, 0x5a, 0xbd, 0xb4, 0x12, 0x5c, 0x79, 0xe2, 0x06, 0x09, 0xf5, 0x5f, 0x72, 0x98, + 0x37, 0x22, 0x6a, 0x6b, 0xed, 0x36, 0x94, 0x22, 0x60, 0xb4, 0x00, 0x99, 0x03, 0x3a, 0xaa, 0x1a, + 0x4b, 0xc6, 0x72, 0x91, 0xf0, 0x21, 0xba, 0x02, 0xd9, 0x77, 0x39, 0x99, 0x6a, 0x7a, 0xc9, 0x58, + 0x2e, 0xad, 0x9e, 0x0f, 0x0f, 0x79, 0xc3, 0xb1, 0xef, 0x0c, 0xa9, 0xd8, 0xad, 0x0e, 0x92, 0x98, + 0x57, 0xd3, 0xcf, 0x1a, 0xf8, 0x12, 0x9c, 0x3e, 0xb6, 0x8e, 0xce, 0x42, 0x4e, 0x60, 0xc8, 0x1b, + 0x17, 0x89, 0x9a, 0xe1, 0x0a, 0xa0, 0x6d, 0xe6, 0x51, 0xab, 0x4f, 0x2c, 0xc6, 0xef, 0x7b, 0x67, + 0x48, 0x7d, 0x86, 0x5f, 0x85, 0xc5, 0x18, 0x54, 0xb1, 0xfd, 0x0c, 0x94, 0xfc, 0x10, 0xac, 0x78, + 0xaf, 0x84, 0xd7, 0x0a, 0xf7, 0x90, 0x28, 0x22, 0xfe, 0xb5, 0x01, 0x10, 0xae, 0xa1, 0x3a, 0x80, + 0x5c, 0x7d, 0xd9, 0xf2, 0xf7, 0x05, 0xc3, 0x26, 0x89, 0x40, 0xd0, 0x65, 0x38, 0x1d, 0xce, 0x6e, + 0xba, 0xdb, 0xfb, 0x96, 0xd7, 0x11, 0x32, 0x30, 0xc9, 0xf1, 0x05, 0x84, 0xc0, 0xf4, 0x2c, 0x46, + 0xab, 0x99, 0x25, 0x63, 0x39, 0x43, 0xc4, 0x98, 0x73, 0xcb, 0xa8, 0x63, 0x39, 0xac, 0x6a, 0x0a, + 0x71, 0xaa, 0x19, 0x87, 0x73, 0xfd, 0x52, 0xbf, 0x9a, 0x5d, 0x32, 0x96, 0xe7, 0x88, 0x9a, 0xe1, + 0x0f, 0x32, 0x50, 0x7e, 0x7d, 0x48, 0xbd, 0x91, 0x12, 0x00, 0xaa, 0x43, 0xc1, 0xa7, 0x3d, 0xda, + 0x66, 0xae, 0x27, 0x35, 0xd2, 0x4a, 0x57, 0x0d, 0x12, 0xc0, 0x50, 0x05, 0xb2, 0x3d, 0xbb, 0x6f, + 0x33, 0x71, 0xad, 0x39, 0x22, 0x27, 0xe8, 0x2a, 0x64, 0x7d, 0x66, 0x79, 0x4c, 0xdc, 0xa5, 0xb4, + 0x5a, 0x5b, 0x91, 0x86, 0xb9, 0xa2, 0x0d, 0x73, 0x65, 0x47, 0x1b, 0x66, 0xab, 0xf0, 0xe1, 0xb8, + 0x91, 0x7a, 0xff, 0x5f, 0x0d, 0x83, 0xc8, 0x2d, 0xe8, 0x19, 0xc8, 0x50, 0xa7, 0x23, 0xee, 0xfb, + 0x45, 0x77, 0xf2, 0x0d, 0xe8, 0x0a, 0x14, 0x3b, 0xb6, 0x47, 0xdb, 0xcc, 0x76, 0x1d, 0xc1, 0xd5, + 0xfc, 0xea, 0x62, 0xa8, 0x91, 0x75, 0xbd, 0x44, 0x42, 0x2c, 0x74, 0x19, 0x72, 0x3e, 0x17, 0x9d, + 0x5f, 0xcd, 0x73, 0x5b, 0x68, 0x55, 0x8e, 0xc6, 0x8d, 0x05, 0x09, 0xb9, 0xec, 0xf6, 0x6d, 0x46, + 0xfb, 0x03, 0x36, 0x22, 0x0a, 0x07, 0x3d, 0x0e, 0xf9, 0x0e, 0xed, 0x51, 0xae, 0xf0, 0x82, 0x50, + 0xf8, 0x42, 0x84, 0xbc, 0x58, 0x20, 0x1a, 0x01, 0xbd, 0x0d, 0xe6, 0xa0, 0x67, 0x39, 0xd5, 0xa2, + 0xe0, 0x62, 0x3e, 0x44, 0xbc, 0xd5, 0xb3, 0x9c, 0xd6, 0x73, 0x9f, 0x8c, 0x1b, 0x4f, 0x77, 0x6d, + 0xb6, 0x3f, 0xdc, 0x5d, 0x69, 0xbb, 0xfd, 0x66, 0xd7, 0xb3, 0xf6, 0x2c, 0xc7, 0x6a, 0xf6, 0xdc, + 0x03, 0xbb, 0xf9, 0xee, 0x53, 0x4d, 0xfe, 0x06, 0xef, 0x0c, 0xa9, 0x67, 0x53, 0xaf, 0xc9, 0xc9, + 0xac, 0x08, 0x95, 0xf0, 0xad, 0x44, 0x90, 0xbd, 0x61, 0x16, 0x72, 0x0b, 0x79, 0x7c, 0x2f, 0x0d, + 0x68, 0xdb, 0xea, 0x0f, 0x7a, 0x74, 0x26, 0x95, 0x05, 0xca, 0x49, 0x9f, 0x58, 0x39, 0x99, 0x59, + 0x95, 0x13, 0x4a, 0xda, 0x9c, 0x4d, 0xd2, 0xd9, 0x2f, 0x2a, 0xe9, 0xdc, 0x57, 0x22, 0x69, 0x5c, + 0x05, 0x93, 0xcf, 0xb8, 0x53, 0xf2, 0xac, 0xbb, 0x42, 0x9e, 0x65, 0xc2, 0x87, 0x78, 0x0b, 0x72, + 0xf2, 0x2e, 0xa8, 0x96, 0x14, 0x78, 0xfc, 0x7d, 0x84, 0xc2, 0xce, 0x68, 0x31, 0x2e, 0x84, 0x62, + 0xcc, 0x08, 0x01, 0xe1, 0xbf, 0x18, 0x30, 0xa7, 0xb4, 0xa8, 0x7c, 0xcc, 0x2e, 0xe4, 0xe5, 0x1b, + 0xd7, 0xfe, 0xe5, 0x5c, 0xd2, 0xbf, 0x5c, 0xeb, 0x58, 0x03, 0x46, 0xbd, 0x56, 0xf3, 0xc3, 0x71, + 0xc3, 0xf8, 0x64, 0xdc, 0x78, 0x6c, 0x1a, 0xa3, 0xda, 0xa7, 0x6b, 0xbf, 0xa4, 0x09, 0xa3, 0x4b, + 0xe2, 0x76, 0xcc, 0x57, 0xa6, 0x70, 0x6a, 0x45, 0x86, 0x82, 0x4d, 0xa7, 0x4b, 0x7d, 0x4e, 0xd9, + 0xe4, 0x5a, 0x24, 0x12, 0x87, 0xb3, 0x79, 0xd7, 0xf2, 0x1c, 0xdb, 0xe9, 0xfa, 0xd5, 0x8c, 0xf0, + 0x9d, 0xc1, 0x1c, 0xff, 0xd2, 0x80, 0xc5, 0x98, 0x29, 0x2a, 0x26, 0x9e, 0x85, 0x9c, 0xcf, 0xa5, + 0xab, 0x79, 0x88, 0x28, 0x72, 0x5b, 0xc0, 0x5b, 0xf3, 0xea, 0xf2, 0x39, 0x39, 0x27, 0x0a, 0xff, + 0xc1, 0x5d, 0xed, 0xef, 0x06, 0x94, 0x45, 0x00, 0xd0, 0xef, 0x03, 0x81, 0xe9, 0x58, 0x7d, 0xaa, + 0x54, 0x25, 0xc6, 0x91, 0xa8, 0xc0, 0x8f, 0x2b, 0xe8, 0xa8, 0x30, 0xab, 0x23, 0x33, 0x4e, 0xec, + 0xc8, 0x8c, 0xf0, 0xad, 0x54, 0x20, 0xcb, 0x4d, 0x72, 0x24, 0x9c, 0x58, 0x91, 0xc8, 0x09, 0x7e, + 0x0c, 0xe6, 0x14, 0x17, 0x4a, 0xb4, 0xd3, 0x02, 0x59, 0x1f, 0x72, 0x52, 0x13, 0xe8, 0x1b, 0x50, + 0x0c, 0x12, 0x00, 0xc1, 0x6d, 0xa6, 0x95, 0x3b, 0x1a, 0x37, 0xd2, 0xcc, 0x27, 0xe1, 0x02, 0x6a, + 0x44, 0x83, 0xab, 0xd1, 0x2a, 0x1e, 0x8d, 0x1b, 0x12, 0xa0, 0x42, 0x29, 0xba, 0x00, 0xe6, 0x3e, + 0x8f, 0x4f, 0x5c, 0x04, 0x66, 0xab, 0x70, 0x34, 0x6e, 0x88, 0x39, 0x11, 0x7f, 0xf1, 0x06, 0x94, + 0xb7, 0x68, 0xd7, 0x6a, 0x8f, 0xd4, 0xa1, 0x15, 0x4d, 0x8e, 0x1f, 0x68, 0x68, 0x1a, 0x8f, 0x40, + 0x39, 0x38, 0xf1, 0x9d, 0xbe, 0xaf, 0x5e, 0x43, 0x29, 0x80, 0xbd, 0xea, 0xe3, 0x5f, 0x19, 0xa0, + 0x6c, 0x00, 0xe1, 0x48, 0x56, 0xc1, 0xfd, 0x17, 0x1c, 0x8d, 0x1b, 0x0a, 0xa2, 0x93, 0x06, 0xf4, + 0x3c, 0xe4, 0x7d, 0x71, 0x22, 0x27, 0x96, 0x34, 0x2d, 0xb1, 0xd0, 0x3a, 0xc5, 0x4d, 0xe4, 0x68, + 0xdc, 0xd0, 0x88, 0x44, 0x0f, 0xd0, 0x4a, 0x2c, 0xf0, 0x4a, 0xc6, 0xe6, 0x8f, 0xc6, 0x8d, 0x08, + 0x34, 0x1a, 0x88, 0xf1, 0xe7, 0x06, 0x94, 0x76, 0x2c, 0x3b, 0x30, 0xa1, 0xaa, 0x56, 0x51, 0xe8, + 0x5f, 0x25, 0x80, 0x5b, 0x62, 0x87, 0xf6, 0xac, 0xd1, 0x75, 0xd7, 0x13, 0x74, 0xe7, 0x48, 0x30, + 0x0f, 0x63, 0xa5, 0x39, 0x31, 0x56, 0x66, 0x67, 0x77, 0xc7, 0x5f, 0xad, 0xf3, 0xbb, 0x61, 0x16, + 0xd2, 0x0b, 0x19, 0xfc, 0x47, 0x03, 0xca, 0x92, 0x79, 0x65, 0x79, 0x3f, 0x82, 0x9c, 0x94, 0x8d, + 0x60, 0xff, 0xff, 0x38, 0xa6, 0x4b, 0xb3, 0x38, 0x25, 0x45, 0x13, 0xbd, 0x08, 0xf3, 0x1d, 0xcf, + 0x1d, 0x0c, 0x68, 0x67, 0x5b, 0xb9, 0xbf, 0x74, 0xd2, 0xfd, 0xad, 0x47, 0xd7, 0x49, 0x02, 0x1d, + 0xff, 0xc3, 0x80, 0x39, 0xe5, 0x4c, 0x94, 0xba, 0x02, 0x11, 0x1b, 0x27, 0x8e, 0x78, 0xe9, 0x59, + 0x23, 0xde, 0x59, 0xc8, 0x75, 0x3d, 0x77, 0x38, 0xd0, 0x0e, 0x49, 0xcd, 0x66, 0x8b, 0x84, 0xf8, + 0x06, 0xcc, 0x6b, 0x56, 0xa6, 0x78, 0xd4, 0x5a, 0xd2, 0xa3, 0x6e, 0x76, 0xa8, 0xc3, 0xec, 0x3d, + 0x3b, 0xf0, 0x91, 0x0a, 0x1f, 0xff, 0xd4, 0x80, 0x85, 0x24, 0x0a, 0x5a, 0x4f, 0x24, 0xf0, 0x8f, + 0x4e, 0x27, 0x17, 0xcd, 0xdd, 0x35, 0x69, 0x95, 0xc1, 0x3f, 0x7d, 0xbf, 0x0c, 0xbe, 0x12, 0x75, + 0x32, 0x45, 0xe5, 0x15, 0xf0, 0x2f, 0x0c, 0x98, 0x8b, 0xe9, 0x12, 0x3d, 0x0b, 0xe6, 0x9e, 0xe7, + 0xf6, 0x67, 0x52, 0x94, 0xd8, 0x81, 0xbe, 0x0d, 0x69, 0xe6, 0xce, 0xa4, 0xa6, 0x34, 0x73, 0xb9, + 0x96, 0x14, 0xfb, 0x19, 0x99, 0x1f, 0xcb, 0x19, 0x7e, 0x1a, 0x8a, 0x82, 0xa1, 0x5b, 0x96, 0xed, + 0x4d, 0x0c, 0x18, 0x93, 0x19, 0x7a, 0x1e, 0x4e, 0x49, 0x67, 0x38, 0x79, 0x73, 0x79, 0xd2, 0xe6, + 0xb2, 0xde, 0x7c, 0x1e, 0xb2, 0x6b, 0xfb, 0x43, 0xe7, 0x80, 0x6f, 0xe9, 0x58, 0xcc, 0xd2, 0x5b, + 0xf8, 0x18, 0x9f, 0x81, 0x45, 0xfe, 0x06, 0xa9, 0xe7, 0xaf, 0xb9, 0x43, 0x87, 0xe9, 0xfa, 0xe4, + 0x32, 0x54, 0xe2, 0x60, 0x65, 0x25, 0x15, 0xc8, 0xb6, 0x39, 0x40, 0xd0, 0x98, 0x23, 0x72, 0x82, + 0x7f, 0x6b, 0x00, 0xda, 0xa0, 0x4c, 0x9c, 0xb2, 0xb9, 0x1e, 0x3c, 0x8f, 0x1a, 0x14, 0xfa, 0x16, + 0x6b, 0xef, 0x53, 0xcf, 0xd7, 0xf9, 0x8b, 0x9e, 0x7f, 0x1d, 0xc9, 0x22, 0xbe, 0x02, 0x8b, 0xb1, + 0x5b, 0x2a, 0x9e, 0x6a, 0x50, 0x68, 0x2b, 0x98, 0x0a, 0x79, 0xc1, 0x1c, 0xff, 0x29, 0x0d, 0x05, + 0xb1, 0x81, 0xd0, 0x3d, 0x74, 0x05, 0x4a, 0x7b, 0xb6, 0xd3, 0xa5, 0xde, 0xc0, 0xb3, 0x95, 0x08, + 0xcc, 0xd6, 0xa9, 0xa3, 0x71, 0x23, 0x0a, 0x26, 0xd1, 0x09, 0x7a, 0x02, 0xf2, 0x43, 0x9f, 0x7a, + 0xef, 0xd8, 0xf2, 0xa5, 0x17, 0x5b, 0x95, 0xc3, 0x71, 0x23, 0xf7, 0x86, 0x4f, 0xbd, 0xcd, 0x75, + 0x1e, 0x7c, 0x86, 0x62, 0x44, 0xe4, 0x6f, 0x07, 0xbd, 0xa2, 0xcc, 0x54, 0x24, 0x70, 0xad, 0xef, + 0xf0, 0xeb, 0x27, 0x5c, 0xdd, 0xc0, 0x73, 0xfb, 0x94, 0xed, 0xd3, 0xa1, 0xdf, 0x6c, 0xbb, 0xfd, + 0xbe, 0xeb, 0x34, 0x45, 0xc5, 0x2d, 0x98, 0xe6, 0x11, 0x94, 0x6f, 0x57, 0x96, 0xbb, 0x03, 0x79, + 0xb6, 0xef, 0xb9, 0xc3, 0xee, 0xbe, 0x08, 0x0c, 0x99, 0xd6, 0xd5, 0xd9, 0xe9, 0x69, 0x0a, 0x44, + 0x0f, 0xd0, 0x23, 0x5c, 0x5a, 0xb4, 0x7d, 0xe0, 0x0f, 0xfb, 0xb2, 0xc6, 0x6b, 0x65, 0x8f, 0xc6, + 0x0d, 0xe3, 0x09, 0x12, 0x80, 0xf1, 0x4f, 0xd2, 0xd0, 0x88, 0x94, 0xc6, 0xd7, 0x5d, 0xef, 0x55, + 0xca, 0x3c, 0xbb, 0x7d, 0xd3, 0xea, 0x53, 0x6d, 0x1b, 0x0d, 0x28, 0xf5, 0x05, 0xf0, 0x9d, 0xc8, + 0x13, 0x80, 0x7e, 0x80, 0x87, 0x2e, 0x02, 0x88, 0x37, 0x23, 0xd7, 0xe5, 0x6b, 0x28, 0x0a, 0x88, + 0x58, 0x5e, 0x8b, 0x49, 0xaa, 0x39, 0x23, 0x67, 0x4a, 0x42, 0x9b, 0x49, 0x09, 0xcd, 0x4c, 0x27, + 0x10, 0x4b, 0xd4, 0xd6, 0xb3, 0x71, 0x5b, 0xc7, 0xff, 0x34, 0xa0, 0xbe, 0xa5, 0x6f, 0x7e, 0x42, + 0x71, 0x68, 0x7e, 0xd3, 0x0f, 0x88, 0xdf, 0xcc, 0x97, 0xe3, 0x17, 0xd7, 0x01, 0xb6, 0x6c, 0x87, + 0x5e, 0xb7, 0x7b, 0x8c, 0x7a, 0x13, 0xaa, 0x98, 0x9f, 0x67, 0x42, 0x97, 0x40, 0xe8, 0x9e, 0xe6, + 0x73, 0x2d, 0xe2, 0x87, 0x1f, 0x04, 0x1b, 0xe9, 0x07, 0xa8, 0xb6, 0x4c, 0xc2, 0x45, 0x39, 0x90, + 0xdf, 0x13, 0xec, 0xc9, 0x90, 0x1a, 0x6b, 0xc4, 0x84, 0xbc, 0xb7, 0xbe, 0xa7, 0x0e, 0x7f, 0xe6, + 0x3e, 0x19, 0x91, 0x68, 0x8f, 0x35, 0xfd, 0x91, 0xc3, 0xac, 0xf7, 0x22, 0xfb, 0x89, 0x3e, 0x04, + 0x59, 0x2a, 0xe9, 0xca, 0x4e, 0x4c, 0xba, 0x5e, 0x50, 0xc7, 0x7c, 0xa9, 0xaa, 0xf3, 0x85, 0xd0, + 0x03, 0x0a, 0xa5, 0x28, 0x0f, 0xf8, 0x28, 0x98, 0x1e, 0xdd, 0xd3, 0xa1, 0x1a, 0x85, 0x27, 0x07, + 0x98, 0x62, 0x1d, 0xff, 0xd5, 0x80, 0x85, 0x0d, 0xca, 0xe2, 0x49, 0xd0, 0x43, 0xa4, 0x52, 0xfc, + 0x32, 0x9c, 0x8e, 0xdc, 0x5f, 0x71, 0xff, 0x54, 0x22, 0xf3, 0x39, 0x13, 0xf2, 0xbf, 0xe9, 0x74, + 0xe8, 0x7b, 0xaa, 0xa0, 0x8c, 0x27, 0x3d, 0xb7, 0xa0, 0x14, 0x59, 0x44, 0xd7, 0x12, 0xe9, 0xce, + 0x62, 0xa2, 0x5f, 0xc9, 0x43, 0x76, 0xab, 0xa2, 0x78, 0x92, 0x65, 0xa3, 0x4a, 0x66, 0x83, 0xd4, + 0x60, 0x1b, 0x90, 0x50, 0x97, 0x20, 0x1b, 0x0d, 0x4e, 0x02, 0xfa, 0x4a, 0x90, 0xf7, 0x04, 0x73, + 0xf4, 0x08, 0x98, 0x9e, 0x7b, 0x57, 0xe7, 0xb1, 0x73, 0xe1, 0x91, 0xc4, 0xbd, 0x4b, 0xc4, 0x12, + 0x7e, 0x1e, 0x32, 0xc4, 0xbd, 0x8b, 0xea, 0x00, 0x9e, 0xe5, 0x74, 0xe9, 0xed, 0xa0, 0x82, 0x2a, + 0x93, 0x08, 0x64, 0x4a, 0xe2, 0xb0, 0x06, 0xa7, 0xa3, 0x37, 0x92, 0xea, 0x5e, 0x81, 0xfc, 0xeb, + 0xc3, 0xa8, 0xb8, 0x2a, 0x09, 0x71, 0xc9, 0x42, 0x5d, 0x23, 0x71, 0x9b, 0x81, 0x10, 0x8e, 0x2e, + 0x40, 0x91, 0x59, 0xbb, 0x3d, 0x7a, 0x33, 0x74, 0x73, 0x21, 0x80, 0xaf, 0xf2, 0xe2, 0xef, 0x76, + 0x24, 0x03, 0x0a, 0x01, 0xe8, 0x71, 0x58, 0x08, 0xef, 0x7c, 0xcb, 0xa3, 0x7b, 0xf6, 0x7b, 0x42, + 0xc3, 0x65, 0x72, 0x0c, 0x8e, 0x96, 0xe1, 0x54, 0x08, 0xdb, 0x16, 0x99, 0x86, 0x29, 0x50, 0x93, + 0x60, 0x2e, 0x1b, 0xc1, 0xee, 0x4b, 0x77, 0x86, 0x56, 0x4f, 0x3c, 0xbe, 0x32, 0x89, 0x40, 0xf0, + 0xdf, 0x0c, 0x38, 0x2d, 0x55, 0xcd, 0x2c, 0xf6, 0x50, 0x5a, 0xfd, 0xef, 0x0c, 0x40, 0x51, 0x0e, + 0x94, 0x69, 0x7d, 0x33, 0xda, 0x08, 0xe2, 0xa9, 0x4c, 0x49, 0xd4, 0xb4, 0x12, 0x14, 0xf6, 0x72, + 0x30, 0xe4, 0x44, 0x3a, 0x24, 0x8b, 0x6b, 0x53, 0x16, 0xcd, 0x12, 0x42, 0xd4, 0x2f, 0xaf, 0xf5, + 0x77, 0x47, 0x8c, 0xfa, 0xaa, 0xe4, 0x15, 0xb5, 0xbe, 0x00, 0x10, 0xf9, 0xc3, 0xcf, 0xa2, 0x0e, + 0x13, 0x56, 0x63, 0x86, 0x67, 0x29, 0x10, 0xd1, 0x03, 0xfc, 0x87, 0x34, 0xcc, 0xdd, 0x76, 0x7b, + 0xc3, 0x30, 0x30, 0x3e, 0x4c, 0x01, 0x23, 0x56, 0x87, 0x67, 0x75, 0x1d, 0x8e, 0xc0, 0xf4, 0x19, + 0x1d, 0x08, 0xcb, 0xca, 0x10, 0x31, 0x46, 0x18, 0xca, 0xcc, 0xf2, 0xba, 0x94, 0xc9, 0xea, 0xa6, + 0x9a, 0x13, 0x69, 0x67, 0x0c, 0x86, 0x96, 0xa0, 0x64, 0x75, 0xbb, 0x1e, 0xed, 0x5a, 0x8c, 0xb6, + 0x46, 0xd5, 0xbc, 0x38, 0x2c, 0x0a, 0xc2, 0x6f, 0xc1, 0xbc, 0x16, 0x96, 0x52, 0xe9, 0x93, 0x90, + 0x7f, 0x57, 0x40, 0x26, 0xf4, 0xc5, 0x24, 0xaa, 0x72, 0x63, 0x1a, 0x2d, 0xde, 0x67, 0xd7, 0x77, + 0xc6, 0x37, 0x20, 0x27, 0xd1, 0xd1, 0x85, 0x68, 0x8d, 0x22, 0x9b, 0x34, 0x7c, 0xae, 0x0a, 0x0e, + 0x0c, 0x39, 0x49, 0x48, 0x29, 0x5e, 0xd8, 0x86, 0x84, 0x10, 0xf5, 0x8b, 0xff, 0x6b, 0xc0, 0x99, + 0x75, 0xca, 0x68, 0x9b, 0xd1, 0xce, 0x75, 0x9b, 0xf6, 0x3a, 0x5f, 0x6b, 0xf9, 0x1c, 0x34, 0xc1, + 0x32, 0x91, 0x26, 0x18, 0xf7, 0x3b, 0x3d, 0xdb, 0xa1, 0x5b, 0x91, 0x2e, 0x4a, 0x08, 0xe0, 0x1e, + 0x62, 0x8f, 0x5f, 0x5c, 0x2e, 0xcb, 0x0f, 0x1b, 0x11, 0x48, 0xa0, 0xe1, 0x5c, 0xa8, 0x61, 0x6c, + 0xc3, 0xd9, 0x24, 0xd3, 0x4a, 0x47, 0x4d, 0xc8, 0x89, 0xbd, 0x13, 0xda, 0xaf, 0xb1, 0x1d, 0x44, + 0xa1, 0x25, 0x8e, 0x4f, 0x27, 0x8f, 0xc7, 0x3f, 0xe3, 0xd5, 0x6e, 0x74, 0xa7, 0x50, 0x2a, 0x37, + 0x22, 0xe5, 0x60, 0xe5, 0x04, 0x7d, 0x0b, 0x4c, 0x36, 0x1a, 0x28, 0xbf, 0xda, 0x3a, 0xf3, 0xf9, + 0xb8, 0x71, 0x3a, 0xb6, 0x6d, 0x67, 0x34, 0xa0, 0x44, 0xa0, 0x70, 0xdb, 0x6b, 0x5b, 0x5e, 0xc7, + 0x76, 0xac, 0x9e, 0xcd, 0xa4, 0xac, 0x4c, 0x12, 0x05, 0xa1, 0x8b, 0x90, 0xf3, 0x0f, 0x28, 0x6b, + 0xcb, 0xcc, 0xb9, 0xac, 0x8b, 0x00, 0x05, 0xc4, 0xbf, 0x89, 0x28, 0x5d, 0xda, 0xf3, 0x09, 0x95, + 0x6e, 0x9c, 0x58, 0xe9, 0xc6, 0x7d, 0x94, 0x8e, 0x7f, 0x10, 0xaa, 0x48, 0x5f, 0x51, 0xa9, 0xe8, + 0x45, 0x98, 0xef, 0xc4, 0x56, 0xa6, 0xab, 0x4a, 0xf6, 0x4e, 0x13, 0xe8, 0x78, 0x23, 0xd4, 0x88, + 0x80, 0x4c, 0xd1, 0x48, 0x42, 0xcc, 0xe9, 0x63, 0x62, 0x7e, 0xfc, 0x51, 0x28, 0x06, 0x5f, 0x98, + 0x50, 0x09, 0xf2, 0xd7, 0x5f, 0x23, 0x6f, 0x5e, 0x23, 0xeb, 0x0b, 0x29, 0x54, 0x86, 0x42, 0xeb, + 0xda, 0xda, 0x2b, 0x62, 0x66, 0xac, 0x7e, 0x90, 0xd3, 0x61, 0xd9, 0x43, 0xdf, 0x85, 0xac, 0x8c, + 0xb5, 0x67, 0xc3, 0xeb, 0x46, 0x3f, 0xe4, 0xd4, 0xce, 0x1d, 0x83, 0x4b, 0xbe, 0x71, 0xea, 0x49, + 0x03, 0xdd, 0x84, 0x92, 0x00, 0xaa, 0xb6, 0xeb, 0x85, 0x64, 0xf7, 0x33, 0x46, 0xe9, 0xe2, 0x94, + 0xd5, 0x08, 0xbd, 0xab, 0x90, 0x95, 0x22, 0x38, 0x9b, 0x48, 0x89, 0x26, 0xdc, 0x26, 0xd6, 0x88, + 0xc6, 0x29, 0xf4, 0x1c, 0x98, 0x3b, 0x96, 0xdd, 0x43, 0x91, 0x8c, 0x2c, 0xd2, 0x2d, 0xad, 0x9d, + 0x4d, 0x82, 0x23, 0xc7, 0xbe, 0x10, 0x34, 0x7d, 0xcf, 0x25, 0x3b, 0x4f, 0x7a, 0x7b, 0xf5, 0xf8, + 0x42, 0x70, 0xf2, 0x6b, 0xb2, 0x35, 0xa9, 0xfb, 0x1f, 0xe8, 0x62, 0xfc, 0xa8, 0x44, 0xbb, 0xa4, + 0x56, 0x9f, 0xb6, 0x1c, 0x10, 0xdc, 0x82, 0x52, 0xa4, 0xf7, 0x10, 0x15, 0xeb, 0xf1, 0xc6, 0x49, + 0x54, 0xac, 0x13, 0x1a, 0x16, 0x38, 0x85, 0x36, 0xa0, 0xc0, 0xf3, 0x58, 0xf1, 0x8d, 0xe2, 0x7c, + 0x32, 0x5d, 0x8d, 0xa4, 0x29, 0xb5, 0x0b, 0x93, 0x17, 0x03, 0x42, 0xdf, 0x87, 0xe2, 0x06, 0x65, + 0xca, 0xd7, 0x9f, 0x4b, 0x06, 0x8b, 0x09, 0x92, 0x8a, 0x07, 0x1c, 0x9c, 0x42, 0x6f, 0x89, 0x94, + 0x3a, 0xee, 0xeb, 0x50, 0x63, 0x8a, 0x4f, 0x0b, 0xee, 0xb5, 0x34, 0x1d, 0x21, 0xa0, 0xfc, 0x66, + 0x8c, 0xb2, 0x8a, 0x8a, 0x8d, 0x29, 0x4f, 0x30, 0xa0, 0xdc, 0xb8, 0xcf, 0x7f, 0x0a, 0xe0, 0xd4, + 0xea, 0xdb, 0xfa, 0x63, 0xf9, 0xba, 0xc5, 0x2c, 0xf4, 0x1a, 0xcc, 0x0b, 0x59, 0x06, 0x5f, 0xd3, + 0x63, 0x36, 0x7f, 0xec, 0xd3, 0x7d, 0xcc, 0xe6, 0x8f, 0x7f, 0xc2, 0xc7, 0xa9, 0xd6, 0xdb, 0x1f, + 0xdd, 0xab, 0xa7, 0x3e, 0xbe, 0x57, 0x4f, 0x7d, 0x76, 0xaf, 0x6e, 0xfc, 0xf8, 0xb0, 0x6e, 0xfc, + 0xfe, 0xb0, 0x6e, 0x7c, 0x78, 0x58, 0x37, 0x3e, 0x3a, 0xac, 0x1b, 0xff, 0x3e, 0xac, 0x1b, 0xff, + 0x39, 0xac, 0xa7, 0x3e, 0x3b, 0xac, 0x1b, 0xef, 0x7f, 0x5a, 0x4f, 0x7d, 0xf4, 0x69, 0x3d, 0xf5, + 0xf1, 0xa7, 0xf5, 0xd4, 0x0f, 0x1f, 0xbb, 0x7f, 0xf9, 0x28, 0x1d, 0x5d, 0x4e, 0xfc, 0x3c, 0xf5, + 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x90, 0x3c, 0x9c, 0xf7, 0xd2, 0x21, 0x00, 0x00, } func (x Direction) String() string { @@ -4800,6 +4819,9 @@ func (this *DetectedFieldsResponse) Equal(that interface{}) bool { return false } } + if this.FieldLimit != that1.FieldLimit { + return false + } return true } func (this *DetectedField) Equal(that interface{}) bool { @@ -4830,6 +4852,9 @@ func (this *DetectedField) Equal(that interface{}) bool { if this.Cardinality != that1.Cardinality { return false } + if !bytes.Equal(this.Sketch, that1.Sketch) { + return false + } return true } func (this *DetectedLabelsRequest) Equal(that interface{}) bool { @@ -5560,11 +5585,12 @@ func (this *DetectedFieldsResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&logproto.DetectedFieldsResponse{") if this.Fields != nil { s = append(s, "Fields: "+fmt.Sprintf("%#v", this.Fields)+",\n") } + s = append(s, "FieldLimit: "+fmt.Sprintf("%#v", this.FieldLimit)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -5572,11 +5598,12 @@ func (this *DetectedField) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 8) s = append(s, "&logproto.DetectedField{") s = append(s, "Label: "+fmt.Sprintf("%#v", this.Label)+",\n") s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Sketch: "+fmt.Sprintf("%#v", this.Sketch)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -8420,6 +8447,11 @@ func (m *DetectedFieldsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.FieldLimit != 0 { + i = encodeVarintLogproto(dAtA, i, uint64(m.FieldLimit)) + i-- + dAtA[i] = 0x10 + } if len(m.Fields) > 0 { for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { { @@ -8457,6 +8489,13 @@ func (m *DetectedField) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Sketch) > 0 { + i -= len(m.Sketch) + copy(dAtA[i:], m.Sketch) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Sketch))) + i-- + dAtA[i] = 0x22 + } if m.Cardinality != 0 { i = encodeVarintLogproto(dAtA, i, uint64(m.Cardinality)) i-- @@ -9581,6 +9620,9 @@ func (m *DetectedFieldsResponse) Size() (n int) { n += 1 + l + sovLogproto(uint64(l)) } } + if m.FieldLimit != 0 { + n += 1 + sovLogproto(uint64(m.FieldLimit)) + } return n } @@ -9601,6 +9643,10 @@ func (m *DetectedField) Size() (n int) { if m.Cardinality != 0 { n += 1 + sovLogproto(uint64(m.Cardinality)) } + l = len(m.Sketch) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -10317,6 +10363,7 @@ func (this *DetectedFieldsResponse) String() string { repeatedStringForFields += "}" s := strings.Join([]string{`&DetectedFieldsResponse{`, `Fields:` + repeatedStringForFields + `,`, + `FieldLimit:` + fmt.Sprintf("%v", this.FieldLimit) + `,`, `}`, }, "") return s @@ -10329,6 +10376,7 @@ func (this *DetectedField) String() string { `Label:` + fmt.Sprintf("%v", this.Label) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Sketch:` + fmt.Sprintf("%v", this.Sketch) + `,`, `}`, }, "") return s @@ -16884,6 +16932,25 @@ func (m *DetectedFieldsResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldLimit", wireType) + } + m.FieldLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FieldLimit |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) @@ -17020,6 +17087,40 @@ func (m *DetectedField) Unmarshal(dAtA []byte) error { break } } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sketch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sketch = append(m.Sketch[:0], dAtA[iNdEx:postIndex]...) + if m.Sketch == nil { + m.Sketch = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index b4986750a2ab6..487eea41dce0b 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -452,12 +452,16 @@ message DetectedFieldsRequest { message DetectedFieldsResponse { repeated DetectedField fields = 1; + uint32 fieldLimit = 2; } +// TODO: make the detected field include the serialized sketch +// we only want cardinality in the JSON response message DetectedField { string label = 1; string type = 2 [(gogoproto.casttype) = "DetectedFieldType"]; uint64 cardinality = 3; + bytes sketch = 4 [(gogoproto.jsontag) = "-"]; //serialized hyperloglog sketch } message DetectedLabelsRequest { diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 6696f76d45b13..843fd5054fb85 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1085,7 +1085,6 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { t.Server.HTTP.Path("/loki/api/v1/labels").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/label/{name}/values").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/series").Methods("GET", "POST").Handler(frontendHandler) - t.Server.HTTP.Path("/loki/api/v1/detected_fields").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/patterns").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/detected_labels").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/index/stats").Methods("GET", "POST").Handler(frontendHandler) @@ -1105,6 +1104,12 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { t.Server.HTTP.Path("/api/prom/tail").Methods("GET", "POST").Handler(defaultHandler) } + // We don't marshal the hyperloglog sketch in the detected fields response to JSON, so this endpoint + // only works correctly in V2 with protobuf encoding enabled. + if frontendV2 != nil && frontendV2.IsProtobufEncoded() { + t.Server.HTTP.Path("/loki/api/v1/detected_fields").Methods("GET", "POST").Handler(frontendHandler) + } + if t.frontend == nil { return services.NewIdleService(nil, func(_ error) error { if t.stopper != nil { diff --git a/pkg/lokifrontend/frontend/v2/frontend.go b/pkg/lokifrontend/frontend/v2/frontend.go index 5311573020735..dae27ec94682c 100644 --- a/pkg/lokifrontend/frontend/v2/frontend.go +++ b/pkg/lokifrontend/frontend/v2/frontend.go @@ -446,6 +446,14 @@ func (f *Frontend) CheckReady(_ context.Context) error { return errors.New(msg) } +func (f *Frontend) IsProtobufEncoded() bool { + return f.cfg.Encoding == EncodingProtobuf +} + +func (f *Frontend) IsJSONEncoded() bool { + return f.cfg.Encoding == EncodingJSON +} + const stripeSize = 1 << 6 type requestsInProgress struct { diff --git a/pkg/querier/http.go b/pkg/querier/http.go index 5d9f216a1463b..d4e6291532035 100644 --- a/pkg/querier/http.go +++ b/pkg/querier/http.go @@ -386,7 +386,8 @@ func (q *QuerierAPI) DetectedFieldsHandler(ctx context.Context, req *logproto.De "msg", "queried store for detected fields that does not support it, no response from querier.DetectedFields", ) return &logproto.DetectedFieldsResponse{ - Fields: []*logproto.DetectedField{}, + Fields: []*logproto.DetectedField{}, + FieldLimit: req.GetFieldLimit(), }, nil } return resp, nil diff --git a/pkg/querier/multi_tenant_querier.go b/pkg/querier/multi_tenant_querier.go index 12cb412a61cf9..54897200036d8 100644 --- a/pkg/querier/multi_tenant_querier.go +++ b/pkg/querier/multi_tenant_querier.go @@ -278,7 +278,8 @@ func (q *MultiTenantQuerier) DetectedFields(ctx context.Context, req *logproto.D ) return &logproto.DetectedFieldsResponse{ - Fields: []*logproto.DetectedField{}, + Fields: []*logproto.DetectedField{}, + FieldLimit: req.GetFieldLimit(), }, nil } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 12c68221e0d87..df75af9172921 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -1018,27 +1018,39 @@ func (q *SingleTenantQuerier) DetectedFields(ctx context.Context, req *logproto. // TODO(twhitney): converting from a step to a duration should be abstracted and reused, // doing this in a few places now. - streams, err := streamsForFieldDetection(iters, req.LineLimit, time.Duration(req.Step*1e6)) + streams, err := streamsForFieldDetection(iters, req.LineLimit, time.Duration(req.Step)) if err != nil { return nil, err } detectedFields := parseDetectedFields(ctx, req.FieldLimit, streams) + //TODO: detected field needs to contain the sketch + // make sure response to frontend is GRPC + //only want cardinality in JSON fields := make([]*logproto.DetectedField, len(detectedFields)) fieldCount := 0 for k, v := range detectedFields { + sketch, err := v.sketch.MarshalBinary() + if err != nil { + level.Warn(q.logger).Log("msg", "failed to marshal hyperloglog sketch", "err", err) + continue + } + fields[fieldCount] = &logproto.DetectedField{ Label: k, Type: v.fieldType, Cardinality: v.Estimate(), + Sketch: sketch, } fieldCount++ } + //TODO: detected fields response needs to include the sketch return &logproto.DetectedFieldsResponse{ - Fields: fields, + Fields: fields, + FieldLimit: req.GetFieldLimit(), }, nil } @@ -1064,6 +1076,10 @@ func (p *parsedFields) Estimate() uint64 { return p.sketch.Estimate() } +func (p *parsedFields) Marshal() ([]byte, error) { + return p.sketch.MarshalBinary() +} + func (p *parsedFields) DetermineType(value string) { p.fieldType = determineType(value) p.isTypeDetected = true @@ -1098,7 +1114,6 @@ func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.S fieldCount := uint32(0) for _, stream := range streams { - level.Debug(spanlogger.FromContext(ctx)).Log( "detected_fields", "true", "msg", fmt.Sprintf("looking for detected fields in stream %d with %d lines", stream.Hash, len(stream.Entries))) @@ -1106,12 +1121,15 @@ func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.S for _, entry := range stream.Entries { detected := parseLine(entry.Line) for k, vals := range detected { - if fieldCount >= limit { - return detectedFields + df, ok := detectedFields[k] + if !ok && fieldCount < limit { + df = newParsedFields() + detectedFields[k] = df + fieldCount++ } - if _, ok := detectedFields[k]; !ok { - detectedFields[k] = newParsedFields() + if df == nil { + continue } for _, v := range vals { @@ -1126,8 +1144,6 @@ func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.S level.Debug(spanlogger.FromContext(ctx)).Log( "detected_fields", "true", "msg", fmt.Sprintf("detected field %s with %d values", k, len(vals))) - - fieldCount++ } } } diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 85a77fe859f16..09997156128ef 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -17,6 +17,7 @@ import ( "golang.org/x/exp/maps" "github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache" + "github.com/grafana/loki/v3/pkg/storage/detected" "github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume" "github.com/grafana/dskit/httpgrpc" @@ -972,9 +973,12 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht return req.WithContext(ctx), nil case *DetectedFieldsRequest: params := url.Values{ - "start": []string{fmt.Sprintf("%d", request.Start.UnixNano())}, - "end": []string{fmt.Sprintf("%d", request.End.UnixNano())}, - "query": []string{request.GetQuery()}, + "query": []string{request.GetQuery()}, + "start": []string{fmt.Sprintf("%d", request.Start.UnixNano())}, + "end": []string{fmt.Sprintf("%d", request.End.UnixNano())}, + "line_limit": []string{fmt.Sprintf("%d", request.GetLineLimit())}, + "field_limit": []string{fmt.Sprintf("%d", request.GetFieldLimit())}, + "step": []string{fmt.Sprintf("%d", request.GetStep())}, } u := &url.URL{ @@ -1587,6 +1591,29 @@ func (Codec) MergeResponse(responses ...queryrangebase.Response) (queryrangebase Response: seriesvolume.Merge(resps, resp0.Response.Limit), Headers: headers, }, nil + case *DetectedFieldsResponse: + resp0 := responses[0].(*DetectedFieldsResponse) + headers := resp0.Headers + fieldLimit := resp0.Response.GetFieldLimit() + + fields := []*logproto.DetectedField{} + for _, r := range responses { + fields = append(fields, r.(*DetectedFieldsResponse).Response.Fields...) + } + + mergedFields, err := detected.MergeFields(fields, fieldLimit) + + if err != nil { + return nil, err + } + + return &DetectedFieldsResponse{ + Response: &logproto.DetectedFieldsResponse{ + Fields: mergedFields, + FieldLimit: fieldLimit, + }, + Headers: headers, + }, nil default: return nil, fmt.Errorf("unknown response type (%T) in merging responses", responses[0]) } @@ -1781,8 +1808,12 @@ func ParamsFromRequest(req queryrangebase.Request) (logql.Params, error) { return ¶msStatsWrapper{ IndexStatsRequest: r, }, nil + case *DetectedFieldsRequest: + return ¶msDetectedFieldsWrapper{ + DetectedFieldsRequest: r, + }, nil default: - return nil, fmt.Errorf("expected one of the *LokiRequest, *LokiInstantRequest, *LokiSeriesRequest, *LokiLabelNamesRequest, got (%T)", r) + return nil, fmt.Errorf("expected one of the *LokiRequest, *LokiInstantRequest, *LokiSeriesRequest, *LokiLabelNamesRequest, *DetectedFieldsRequest, got (%T)", r) } } @@ -1950,6 +1981,47 @@ func (p paramsStatsWrapper) Shards() []string { return make([]string, 0) } +type paramsDetectedFieldsWrapper struct { + *DetectedFieldsRequest +} + +func (p paramsDetectedFieldsWrapper) QueryString() string { + return p.GetQuery() +} + +func (p paramsDetectedFieldsWrapper) GetExpression() syntax.Expr { + expr, err := syntax.ParseExpr(p.GetQuery()) + if err != nil { + return nil + } + + return expr +} + +func (p paramsDetectedFieldsWrapper) Start() time.Time { + return p.GetStartTs() +} + +func (p paramsDetectedFieldsWrapper) End() time.Time { + return p.GetEndTs() +} + +func (p paramsDetectedFieldsWrapper) Step() time.Duration { + return time.Duration(p.GetStep() * 1e6) +} + +func (p paramsDetectedFieldsWrapper) Interval() time.Duration { + return 0 +} + +func (p paramsDetectedFieldsWrapper) Direction() logproto.Direction { + return logproto.BACKWARD +} +func (p paramsDetectedFieldsWrapper) Limit() uint32 { return p.DetectedFieldsRequest.LineLimit } +func (p paramsDetectedFieldsWrapper) Shards() []string { + return make([]string, 0) +} + func httpResponseHeadersToPromResponseHeaders(httpHeaders http.Header) []queryrangebase.PrometheusResponseHeader { var promHeaders []queryrangebase.PrometheusResponseHeader for h, hv := range httpHeaders { @@ -2071,12 +2143,15 @@ type DetectedFieldsRequest struct { path string } -func NewDetectedFieldsRequest(start, end time.Time, query, path string) *DetectedFieldsRequest { +func NewDetectedFieldsRequest(start, end time.Time, lineLimit, fieldLimit uint32, step int64, query, path string) *DetectedFieldsRequest { return &DetectedFieldsRequest{ DetectedFieldsRequest: logproto.DetectedFieldsRequest{ - Start: start, - End: end, - Query: query, + Start: start, + End: end, + Query: query, + LineLimit: lineLimit, + FieldLimit: fieldLimit, + Step: step, }, path: path, } @@ -2103,7 +2178,15 @@ func (r *DetectedFieldsRequest) GetStartTs() time.Time { } func (r *DetectedFieldsRequest) GetStep() int64 { - return 0 + return r.Step +} + +func (r *DetectedFieldsRequest) GetLineLimit() uint32 { + return r.LineLimit +} + +func (r *DetectedFieldsRequest) GetFieldLimit() uint32 { + return r.FieldLimit } func (r *DetectedFieldsRequest) Path() string { @@ -2132,6 +2215,11 @@ func (r *DetectedFieldsRequest) LogToSpan(sp opentracing.Span) { sp.LogFields( otlog.String("start", timestamp.Time(r.GetStart().UnixNano()).String()), otlog.String("end", timestamp.Time(r.GetEnd().UnixNano()).String()), + otlog.String("query", r.GetQuery()), + otlog.Int64("step (ms)", r.GetStep()), + otlog.Int64("line_limit", int64(r.GetLineLimit())), + otlog.Int64("field_limit", int64(r.GetFieldLimit())), + otlog.String("step", fmt.Sprintf("%d", r.GetStep())), ) } diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index 273750f71364d..833132d5c2b91 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/axiomhq/hyperloglog" "github.com/gorilla/mux" "github.com/grafana/dskit/user" "github.com/opentracing/opentracing-go/mocktracer" @@ -1622,6 +1623,102 @@ func Test_codec_MergeResponse(t *testing.T) { } } +func Test_codec_MergeResponse_DetectedFieldsResponse(t *testing.T) { + buildDetctedField := func(label string, cardinality uint64) *logproto.DetectedField { + fooSketch := hyperloglog.New() + + for i := 0; i < int(cardinality); i++ { + fooSketch.Insert([]byte(fmt.Sprintf("value %d", i))) + } + marshalledSketch, err := fooSketch.MarshalBinary() + require.NoError(t, err) + + return &logproto.DetectedField{ + Label: label, + Type: logproto.DetectedFieldString, + Cardinality: cardinality, + Sketch: marshalledSketch, + } + } + + t.Run("merges the responses", func(t *testing.T) { + responses := []queryrangebase.Response{ + &DetectedFieldsResponse{ + Response: &logproto.DetectedFieldsResponse{ + Fields: []*logproto.DetectedField{ + buildDetctedField("foo", 1), + }, + FieldLimit: 2, + }, + }, + &DetectedFieldsResponse{ + Response: &logproto.DetectedFieldsResponse{ + Fields: []*logproto.DetectedField{ + buildDetctedField("foo", 3), + }, + FieldLimit: 2, + }, + }, + } + + got, err := DefaultCodec.MergeResponse(responses...) + require.Nil(t, err) + response := got.(*DetectedFieldsResponse).Response + require.Equal(t, 1, len(response.Fields)) + + foo := response.Fields[0] + require.Equal(t, foo.Label, "foo") + require.Equal(t, foo.Type, logproto.DetectedFieldString) + require.Equal(t, foo.Cardinality, uint64(3)) + }) + + t.Run("merges the responses, enforcing the limit", func(t *testing.T) { + responses := []queryrangebase.Response{ + &DetectedFieldsResponse{ + Response: &logproto.DetectedFieldsResponse{ + Fields: []*logproto.DetectedField{ + buildDetctedField("foo", 1), + buildDetctedField("bar", 42), + }, + FieldLimit: 2, + }, + }, + &DetectedFieldsResponse{ + Response: &logproto.DetectedFieldsResponse{ + Fields: []*logproto.DetectedField{ + buildDetctedField("foo", 27), + buildDetctedField("baz", 3), + }, + FieldLimit: 2, + }, + }, + } + + got, err := DefaultCodec.MergeResponse(responses...) + require.Nil(t, err) + response := got.(*DetectedFieldsResponse).Response + require.Equal(t, 2, len(response.Fields)) + + var foo *logproto.DetectedField + var baz *logproto.DetectedField + for _, f := range response.Fields { + if f.Label == "foo" { + foo = f + } + if f.Label == "baz" { + baz = f + } + } + + require.Equal(t, foo.Label, "foo") + require.Equal(t, foo.Type, logproto.DetectedFieldString) + require.Equal(t, 27, int(foo.Cardinality)) + + require.Nil(t, baz) + }) + +} + type badResponse struct{} func (badResponse) Reset() {} diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 012d7778d42a8..228f20a514057 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -235,6 +235,21 @@ func NewMiddleware( return nil, nil, err } + detectedFieldsTripperware, err := NewDetectedFieldsTripperware( + cfg, + engineOpts, + log, + limits, + schema, + codec, + iqo, + metrics, + indexStatsTripperware, + metricsNamespace) + if err != nil { + return nil, nil, err + } + return base.MiddlewareFunc(func(next base.Handler) base.Handler { var ( metricRT = metricsTripperware.Wrap(next) @@ -245,7 +260,7 @@ func NewMiddleware( instantRT = instantMetricTripperware.Wrap(next) statsRT = indexStatsTripperware.Wrap(next) seriesVolumeRT = seriesVolumeTripperware.Wrap(next) - detectedFieldsRT = next // TODO(twhitney): add middlewares for detected fields + detectedFieldsRT = detectedFieldsTripperware.Wrap(next) detectedLabelsRT = next // TODO(shantanu): add middlewares ) @@ -372,11 +387,14 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, case *DetectedFieldsRequest: level.Info(logger).Log( "msg", "executing query", - "type", "detected fields", - "query", op.Query, + "type", "detected_fields", + "end", op.End, + "field_limit", op.FieldLimit, "length", op.End.Sub(op.Start), + "line_limit", op.LineLimit, + "query", op.Query, "start", op.Start, - "end", op.End, + "step", op.Step, ) return r.detectedFields.Do(ctx, req) @@ -1097,3 +1115,44 @@ func sharedIndexTripperware( return base.MergeMiddlewares(middlewares...).Wrap(next) }), nil } + +// NewDetectedFieldsTripperware creates a new frontend tripperware responsible for handling detected field requests, which are basically log filter requests with a bit more processing. +func NewDetectedFieldsTripperware( + cfg Config, + engineOpts logql.EngineOpts, + log log.Logger, + limits Limits, + schema config.SchemaConfig, + merger base.Merger, + iqo util.IngesterQueryOptions, + metrics *Metrics, + indexStatsTripperware base.Middleware, + metricsNamespace string, +) (base.Middleware, error) { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { + statsHandler := indexStatsTripperware.Wrap(next) + + queryRangeMiddleware := []base.Middleware{ + StatsCollectorMiddleware(), + NewLimitsMiddleware(limits), + NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), + base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), + SplitByIntervalMiddleware(schema.Configs, limits, merger, newDefaultSplitter(limits, iqo), metrics.SplitByMetrics), + } + + // The sharding middleware takes care of enforcing this limit for both shardable and non-shardable queries. + // If we are not using sharding, we enforce the limit by adding this middleware after time splitting. + queryRangeMiddleware = append(queryRangeMiddleware, + NewQuerierSizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), + ) + + if cfg.MaxRetries > 0 { + queryRangeMiddleware = append( + queryRangeMiddleware, base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics, metricsNamespace), + ) + } + + return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) + }), nil +} diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index 92c956bbfed82..fc71742859798 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -223,6 +223,11 @@ func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (que intervals[i], intervals[j] = intervals[j], intervals[i] } } + case *DetectedFieldsRequest: + limit = int64(req.LineLimit) + for i, j := 0, len(intervals)-1; i < j; i, j = i+1, j-1 { + intervals[i], intervals[j] = intervals[j], intervals[i] + } case *LokiSeriesRequest, *LabelRequest, *logproto.IndexStatsRequest, *logproto.VolumeRequest, *logproto.ShardsRequest: // Set this to 0 since this is not used in Series/Labels/Index Request. limit = 0 diff --git a/pkg/querier/queryrange/splitters.go b/pkg/querier/queryrange/splitters.go index 30bc3da18392d..42a81f6defd39 100644 --- a/pkg/querier/queryrange/splitters.go +++ b/pkg/querier/queryrange/splitters.go @@ -95,6 +95,20 @@ func (s *defaultSplitter) split(execTime time.Time, tenantIDs []string, req quer AggregateBy: r.AggregateBy, }) } + case *DetectedFieldsRequest: + factory = func(start, end time.Time) { + reqs = append(reqs, &DetectedFieldsRequest{ + DetectedFieldsRequest: logproto.DetectedFieldsRequest{ + Start: start, + End: end, + Query: r.GetQuery(), + LineLimit: r.GetLineLimit(), + FieldLimit: r.GetFieldLimit(), + Step: r.GetStep(), + }, + path: r.path, + }) + } default: return nil, nil } diff --git a/pkg/storage/detected/fields.go b/pkg/storage/detected/fields.go new file mode 100644 index 0000000000000..b9e3c714d1ec9 --- /dev/null +++ b/pkg/storage/detected/fields.go @@ -0,0 +1,93 @@ +package detected + +import ( + "github.com/axiomhq/hyperloglog" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +type UnmarshaledDetectedField struct { + Label string + Type logproto.DetectedFieldType + Sketch *hyperloglog.Sketch +} + +func UnmarshalDetectedField(f *logproto.DetectedField) (*UnmarshaledDetectedField, error) { + sketch := hyperloglog.New() + err := sketch.UnmarshalBinary(f.Sketch) + if err != nil { + return nil, err + } + + return &UnmarshaledDetectedField{ + Label: f.Label, + Type: f.Type, + Sketch: sketch, + }, nil +} + +func (f *UnmarshaledDetectedField) Merge(df *logproto.DetectedField) error { + sketch := hyperloglog.New() + err := sketch.UnmarshalBinary(df.Sketch) + if err != nil { + return err + } + + return f.Sketch.Merge(sketch) +} + +func MergeFields( + fields []*logproto.DetectedField, + fieldLimit uint32, +) ([]*logproto.DetectedField, error) { + mergedFields := make(map[string]*UnmarshaledDetectedField, fieldLimit) + foundFields := uint32(0) + + for _, field := range fields { + if field == nil { + continue + } + + // TODO(twhitney): this will take the first N up to limit, is there a better + // way to rank the fields to make sure we get the most interesting ones? + f, ok := mergedFields[field.Label] + if !ok && foundFields < fieldLimit { + unmarshaledField, err := UnmarshalDetectedField(field) + if err != nil { + return nil, err + } + + mergedFields[field.Label] = unmarshaledField + foundFields++ + continue + } + + if ok { + // seeing the same field again, merge it with the existing one + err := f.Merge(field) + if err != nil { + return nil, err + } + } + } + + result := make([]*logproto.DetectedField, 0, fieldLimit) + for _, field := range mergedFields { + // TODO(twhitney): what's the performance cost of marshalling here? We technically don't need to marshal in the merge + // but it's nice to keep the response consistent through middlewares in case we need the sketch somewhere else, + // need to benchmark this to find out. + sketch, err := field.Sketch.MarshalBinary() + if err != nil { + return nil, err + } + detectedField := &logproto.DetectedField{ + Label: field.Label, + Type: field.Type, + Cardinality: field.Sketch.Estimate(), + Sketch: sketch, + } + result = append(result, detectedField) + } + + return result, nil +} diff --git a/pkg/storage/detected/fields_test.go b/pkg/storage/detected/fields_test.go new file mode 100644 index 0000000000000..4edd7026baf68 --- /dev/null +++ b/pkg/storage/detected/fields_test.go @@ -0,0 +1,94 @@ +package detected + +import ( + "testing" + + "github.com/axiomhq/hyperloglog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +func Test_MergeFields(t *testing.T) { + fooSketch := hyperloglog.New() + fooSketch.Insert([]byte("bar")) + marshalledFooSketch, err := fooSketch.MarshalBinary() + require.NoError(t, err) + + barSketch := hyperloglog.New() + barSketch.Insert([]byte("baz")) + marshalledBarSketch, err := barSketch.MarshalBinary() + require.NoError(t, err) + + otherFooSketch := hyperloglog.New() + otherFooSketch.Insert([]byte("bar")) + otherFooSketch.Insert([]byte("baz")) + otherFooSketch.Insert([]byte("qux")) + marhsalledOtherFooSketch, err := otherFooSketch.MarshalBinary() + require.NoError(t, err) + + fields := []*logproto.DetectedField{ + { + Label: "foo", + Type: logproto.DetectedFieldString, + Cardinality: 1, + Sketch: marshalledFooSketch, + }, + { + Label: "bar", + Type: logproto.DetectedFieldBoolean, + Cardinality: 2, + Sketch: marshalledBarSketch, + }, + { + Label: "foo", + Type: logproto.DetectedFieldString, + Cardinality: 3, + Sketch: marhsalledOtherFooSketch, + }, + } + + limit := uint32(3) + + t.Run("merges fields", func(t *testing.T) { + result, err := MergeFields(fields, limit) + require.NoError(t, err) + assert.Equal(t, 2, len(result)) + var foo *logproto.DetectedField + + for _, field := range result { + if field.Label == "foo" { + foo = field + } + } + + assert.Equal(t, logproto.DetectedFieldString, foo.Type) + assert.Equal(t, uint64(3), foo.Cardinality) + }) + + t.Run("returns up to limit number of fields", func(t *testing.T) { + lowLimit := uint32(1) + result, err := MergeFields(fields, lowLimit) + require.NoError(t, err) + assert.Equal(t, 1, len(result)) + + highLimit := uint32(4) + result, err = MergeFields(fields, highLimit) + require.NoError(t, err) + assert.Equal(t, 2, len(result)) + }) + + t.Run("returns an error when the field cannot be unmarshalled", func(t *testing.T) { + badFields := []*logproto.DetectedField{ + { + Label: "bad", + Type: logproto.DetectedFieldBoolean, + Cardinality: 42, + Sketch: []byte("bad"), + }, + } + _, err := MergeFields(badFields, limit) + require.Error(t, err) + }) +} From 41246a05ac045bf6f51c34d73d7b4cef988e9b44 Mon Sep 17 00:00:00 2001 From: J Stickler Date: Wed, 17 Apr 2024 15:41:37 -0400 Subject: [PATCH 8/8] docs: fix doc-validate errors due to config move (#12662) --- docs/sources/alert/_index.md | 2 +- docs/sources/configure/bp-configure.md | 2 +- docs/sources/get-started/components.md | 4 ++-- docs/sources/get-started/deployment-modes.md | 2 +- docs/sources/get-started/hash-rings.md | 2 +- docs/sources/operations/automatic-stream-sharding.md | 9 +++++++-- docs/sources/operations/blocking-queries.md | 4 ++-- docs/sources/operations/overrides-exporter.md | 2 +- docs/sources/operations/recording-rules.md | 6 +++--- docs/sources/operations/storage/boltdb-shipper.md | 6 +++--- docs/sources/operations/storage/schema/_index.md | 2 +- .../operations/storage/table-manager/_index.md | 10 +++++----- docs/sources/operations/storage/tsdb.md | 4 ++-- docs/sources/release-notes/v2-4.md | 4 ++-- docs/sources/release-notes/v2-5.md | 2 +- docs/sources/send-data/fluentbit/_index.md | 2 +- docs/sources/send-data/fluentd/_index.md | 2 +- docs/sources/send-data/lambda-promtail/_index.md | 2 +- docs/sources/send-data/otel/_index.md | 2 +- docs/sources/send-data/promtail/_index.md | 2 +- docs/sources/send-data/promtail/stages/limit.md | 2 +- .../send-data/promtail/troubleshooting/_index.md | 2 +- docs/sources/setup/migrate/migrate-to-tsdb/_index.md | 12 ++++++------ 23 files changed, 46 insertions(+), 41 deletions(-) diff --git a/docs/sources/alert/_index.md b/docs/sources/alert/_index.md index 47bf991a565c4..2d4b19477a90c 100644 --- a/docs/sources/alert/_index.md +++ b/docs/sources/alert/_index.md @@ -167,7 +167,7 @@ ruler: url: http://localhost:9090/api/v1/write ``` -Further configuration options can be found under [ruler]({{< relref "../configure#ruler" >}}). +Further configuration options can be found under [ruler](https://grafana.com/docs/loki//configure/#ruler). ### Operations diff --git a/docs/sources/configure/bp-configure.md b/docs/sources/configure/bp-configure.md index f5556ebba95e9..28feb68b1ba4c 100644 --- a/docs/sources/configure/bp-configure.md +++ b/docs/sources/configure/bp-configure.md @@ -14,7 +14,7 @@ Loki can cache data at many levels, which can drastically improve performance. D ## Time ordering of logs -Loki [accepts out-of-order writes]({{< relref "../configure#accept-out-of-order-writes" >}}) _by default_. +Loki [accepts out-of-order writes](https://grafana.com/docs/loki//configure/#accept-out-of-order-writes) _by default_. This section identifies best practices when Loki is _not_ configured to accept out-of-order writes. One issue many people have with Loki is their client receiving errors for out of order log entries. This happens because of this hard and fast rule within Loki: diff --git a/docs/sources/get-started/components.md b/docs/sources/get-started/components.md index 4852090f000a7..2ea5349d75105 100644 --- a/docs/sources/get-started/components.md +++ b/docs/sources/get-started/components.md @@ -57,7 +57,7 @@ Currently the only way the distributor mutates incoming data is by normalizing l The distributor can also rate limit incoming logs based on the maximum data ingest rate per tenant. It does this by checking a per-tenant limit and dividing it by the current number of distributors. This allows the rate limit to be specified per tenant at the cluster level and enables us to scale the distributors up or down and have the per-distributor limit adjust accordingly. For instance, say we have 10 distributors and tenant A has a 10MB rate limit. Each distributor will allow up to 1MB/s before limiting. Now, say another large tenant joins the cluster and we need to spin up 10 more distributors. The now 20 distributors will adjust their rate limits for tenant A to `(10MB / 20 distributors) = 500KB/s`. This is how global limits allow much simpler and safer operation of the Loki cluster. {{% admonition type="note" %}} -The distributor uses the `ring` component under the hood to register itself amongst its peers and get the total number of active distributors. This is a different "key" than the ingesters use in the ring and comes from the distributor's own [ring configuration]({{< relref "../configure#distributor" >}}). +The distributor uses the `ring` component under the hood to register itself amongst its peers and get the total number of active distributors. This is a different "key" than the ingesters use in the ring and comes from the distributor's own [ring configuration](https://grafana.com/docs/loki//configure/#distributor). {{% /admonition %}} ### Forwarding @@ -172,7 +172,7 @@ deduplicated. ### Timestamp Ordering -Loki is configured to [accept out-of-order writes]({{< relref "../configure#accept-out-of-order-writes" >}}) by default. +Loki is configured to [accept out-of-order writes](https://grafana.com/docs/loki//configure/#accept-out-of-order-writes) by default. When not configured to accept out-of-order writes, the ingester validates that ingested log lines are in order. When an ingester receives a log line that doesn't follow the expected order, the line diff --git a/docs/sources/get-started/deployment-modes.md b/docs/sources/get-started/deployment-modes.md index d41bfd4dc755c..80d48231a9c11 100644 --- a/docs/sources/get-started/deployment-modes.md +++ b/docs/sources/get-started/deployment-modes.md @@ -52,7 +52,7 @@ The simplest mode of operation is the monolithic deployment mode. You enable mon Monolithic mode is useful for getting started quickly to experiment with Loki, as well as for small read/write volumes of up to approximately 20GB per day. -You can horizontally scale a monolithic mode deployment to more instances by using a shared object store, and by configuring the [`ring` section]({{< relref "../configure#common" >}}) of the `loki.yaml` file to share state between all instances, but the recommendation is to use simple scalable mode if you need to scale your deployment. +You can horizontally scale a monolithic mode deployment to more instances by using a shared object store, and by configuring the [`ring` section](https://grafana.com/docs/loki//configure/#common) of the `loki.yaml` file to share state between all instances, but the recommendation is to use simple scalable mode if you need to scale your deployment. You can configure high availability by running two Loki instances using `memberlist_config` configuration and a shared object store and setting the `replication_factor` to `3`. You route traffic to all the Loki instances in a round robin fashion. diff --git a/docs/sources/get-started/hash-rings.md b/docs/sources/get-started/hash-rings.md index 702e101e7c04b..8bb024f4085fb 100644 --- a/docs/sources/get-started/hash-rings.md +++ b/docs/sources/get-started/hash-rings.md @@ -55,7 +55,7 @@ For each node, the key-value store holds: ## Configuring rings -Define [ring configuration]({{< relref "../configure#common" >}}) within the `common.ring_config` block. +Define [ring configuration](https://grafana.com/docs/loki//configure/#common) within the `common.ring_config` block. Use the default `memberlist` key-value store type unless there is a compelling reason to use a different key-value store type. diff --git a/docs/sources/operations/automatic-stream-sharding.md b/docs/sources/operations/automatic-stream-sharding.md index 1d2d36393c369..04a46fbfe0826 100644 --- a/docs/sources/operations/automatic-stream-sharding.md +++ b/docs/sources/operations/automatic-stream-sharding.md @@ -12,23 +12,28 @@ existing streams. When properly tuned, this should eliminate issues where log pr per-stream rate limit. **To enable automatic stream sharding:** -1. Edit the global [limits_config]({{< relref "../configure#limits_config" >}}) of the Loki configuration file: +1. Edit the global [`limits_config`](https://grafana.com/docs/loki//configure/#limits_config) of the Loki configuration file: + ```yaml limits_config: shard_streams: enabled: true ``` + 1. Optionally lower the `desired_rate` in bytes if you find that the system is still hitting the `per_stream_rate_limit`: + ```yaml limits_config: shard_streams: enabled: true desired_rate: 2097152 #2MiB ``` -1. Optionally enable `logging_enabled` for debugging stream sharding. + +1. Optionally enable `logging_enabled` for debugging stream sharding. {{% admonition type="note" %}} This may affect the ingestion performance of Loki. {{% /admonition %}} + ```yaml limits_config: shard_streams: diff --git a/docs/sources/operations/blocking-queries.md b/docs/sources/operations/blocking-queries.md index 3c3468215d96e..1a88342bd3f16 100644 --- a/docs/sources/operations/blocking-queries.md +++ b/docs/sources/operations/blocking-queries.md @@ -10,7 +10,7 @@ In certain situations, you may not be able to control the queries being sent to may be intentionally or unintentionally expensive to run, and they may affect the overall stability or cost of running your service. -You can block queries using [per-tenant overrides]({{< relref "../configure#runtime-configuration-file" >}}), like so: +You can block queries using [per-tenant overrides](https://grafana.com/docs/loki//configure/#runtime-configuration-file), like so: ```yaml overrides: @@ -36,7 +36,7 @@ overrides: types: filter,limited ``` {{% admonition type="note" %}} -Changes to these configurations **do not require a restart**; they are defined in the [runtime configuration file]({{< relref "../configure#runtime-configuration-file" >}}). +Changes to these configurations **do not require a restart**; they are defined in the [runtime configuration file](https://grafana.com/docs/loki//configure/#runtime-configuration-file). {{% /admonition %}} The available query types are: diff --git a/docs/sources/operations/overrides-exporter.md b/docs/sources/operations/overrides-exporter.md index 6a16d9dbbb040..ef645ca28efde 100644 --- a/docs/sources/operations/overrides-exporter.md +++ b/docs/sources/operations/overrides-exporter.md @@ -11,7 +11,7 @@ Loki is a multi-tenant system that supports applying limits to each tenant as a ## Context -Configuration updates to tenant limits can be applied to Loki without restart via the [`runtime_config`]({{< relref "../configure#runtime_config" >}}) feature. +Configuration updates to tenant limits can be applied to Loki without restart via the [`runtime_config`](https://grafana.com/docs/loki//configure/#runtime_config) feature. ## Example diff --git a/docs/sources/operations/recording-rules.md b/docs/sources/operations/recording-rules.md index afac69b75e271..2254510daf7ee 100644 --- a/docs/sources/operations/recording-rules.md +++ b/docs/sources/operations/recording-rules.md @@ -9,7 +9,7 @@ weight: Recording rules are evaluated by the `ruler` component. Each `ruler` acts as its own `querier`, in the sense that it executes queries against the store without using the `query-frontend` or `querier` components. It will respect all query -[limits]({{< relref "../configure#limits_config" >}}) put in place for the `querier`. +[limits](https://grafana.com/docs/loki//configure/#limits_config) put in place for the `querier`. Loki's implementation of recording rules largely reuses Prometheus' code. @@ -77,8 +77,8 @@ so a `Persistent Volume` should be utilised. ### Per-Tenant Limits Remote-write can be configured at a global level in the base configuration, and certain parameters tuned specifically on -a per-tenant basis. Most of the configuration options [defined here]({{< relref "../configure#ruler" >}}) -have [override options]({{< relref "../configure#limits_config" >}}) (which can be also applied at runtime!). +a per-tenant basis. Most of the configuration options [defined here](https://grafana.com/docs/loki//configure/#ruler) +have [override options](https://grafana.com/docs/loki//configure/#limits_config) (which can be also applied at runtime!). ### Tuning diff --git a/docs/sources/operations/storage/boltdb-shipper.md b/docs/sources/operations/storage/boltdb-shipper.md index df32b95f3eedf..0b299806a0adc 100644 --- a/docs/sources/operations/storage/boltdb-shipper.md +++ b/docs/sources/operations/storage/boltdb-shipper.md @@ -117,14 +117,14 @@ Within Kubernetes, if you are not using an Index Gateway, we recommend running Q An Index Gateway downloads and synchronizes the BoltDB index from the Object Storage in order to serve index queries to the Queriers and Rulers over gRPC. This avoids running Queriers and Rulers with a disk for persistence. Disks can become costly in a big cluster. -To run an Index Gateway, configure [StorageConfig]({{< relref "../../configure#storage_config" >}}) and set the `-target` CLI flag to `index-gateway`. -To connect Queriers and Rulers to the Index Gateway, set the address (with gRPC port) of the Index Gateway with the `-boltdb.shipper.index-gateway-client.server-address` CLI flag or its equivalent YAML value under [StorageConfig]({{< relref "../../configure#storage_config" >}}). +To run an Index Gateway, configure [StorageConfig](https://grafana.com/docs/loki//configure/#storage_config) and set the `-target` CLI flag to `index-gateway`. +To connect Queriers and Rulers to the Index Gateway, set the address (with gRPC port) of the Index Gateway with the `-boltdb.shipper.index-gateway-client.server-address` CLI flag or its equivalent YAML value under [StorageConfig](https://grafana.com/docs/loki//configure/#storage_config). When using the Index Gateway within Kubernetes, we recommend using a StatefulSet with persistent storage for downloading and querying index files. This can obtain better read performance, avoids [noisy neighbor problems](https://en.wikipedia.org/wiki/Cloud_computing_issues#Performance_interference_and_noisy_neighbors) by not using the node disk, and avoids the time consuming index downloading step on startup after rescheduling to a new node. ### Write Deduplication disabled -Loki does write deduplication of chunks and index using Chunks and WriteDedupe cache respectively, configured with [ChunkStoreConfig]({{< relref "../../configure#chunk_store_config" >}}). +Loki does write deduplication of chunks and index using Chunks and WriteDedupe cache respectively, configured with [ChunkStoreConfig](https://grafana.com/docs/loki//configure/#chunk_store_config). The problem with write deduplication when using `boltdb-shipper` though is ingesters only keep uploading boltdb files periodically to make them available to all the other services which means there would be a brief period where some of the services would not have received updated index yet. The problem due to that is if an ingester which first wrote the chunks and index goes down and all the other ingesters which were part of replication scheme skipped writing those chunks and index due to deduplication, we would end up missing those logs from query responses since only the ingester which had the index went down. This problem would be faced even during rollouts which is quite common. diff --git a/docs/sources/operations/storage/schema/_index.md b/docs/sources/operations/storage/schema/_index.md index b7b89d64184e4..9cdca92cb09f2 100644 --- a/docs/sources/operations/storage/schema/_index.md +++ b/docs/sources/operations/storage/schema/_index.md @@ -33,7 +33,7 @@ schema_config: | Property | Description | |--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------| | from | for a new install, this must be a date in the past, use a recent date. Format is YYYY-MM-DD. | -| object_store | s3, azure, gcs, alibabacloud, bos, cos, swift, filesystem, or a named_store (see [StorageConfig]({{< relref "../../../configure#storage_config" >}})). | +| object_store | s3, azure, gcs, alibabacloud, bos, cos, swift, filesystem, or a named_store (see [StorageConfig](https://grafana.com/docs/loki//configure/#storage_config)). | | store | `tsdb` is the current and only recommended value for store. | | schema | `v13` is the most recent schema and recommended value. | | prefix: | any value without spaces is acceptable. | diff --git a/docs/sources/operations/storage/table-manager/_index.md b/docs/sources/operations/storage/table-manager/_index.md index 625123f0bb0ff..148d1fdf7043d 100644 --- a/docs/sources/operations/storage/table-manager/_index.md +++ b/docs/sources/operations/storage/table-manager/_index.md @@ -49,7 +49,7 @@ to store chunks, are not managed by the Table Manager, and a custom bucket polic should be set to delete old data. For detailed information on configuring the Table Manager, refer to the -[`table_manager`]({{< relref "../../../configure#table_manager" >}}) +[`table_manager`](https://grafana.com/docs/loki//configure/#table_manager) section in the Loki configuration document. @@ -58,10 +58,10 @@ section in the Loki configuration document. A periodic table stores the index or chunk data relative to a specific period of time. The duration of the time range of the data stored in a single table and its storage type is configured in the -[`schema_config`]({{< relref "../../../configure#schema_config" >}}) configuration +[`schema_config`](https://grafana.com/docs/loki//configure/#schema_config) configuration block. -The [`schema_config`]({{< relref "../../../configure#schema_config" >}}) can contain +The [`schema_config`](https://grafana.com/docs/loki//configure/#schema_config) can contain one or more `configs`. Each config, defines the storage used between the day set in `from` (in the format `yyyy-mm-dd`) and the next config, or "now" in the case of the last schema config entry. @@ -115,7 +115,7 @@ order to make sure that the new table is ready once the current table end period is reached. The `creation_grace_period` property - in the -[`table_manager`]({{< relref "../../../configure#table_manager" >}}) +[`table_manager`](https://grafana.com/docs/loki//configure/#table_manager) configuration block - defines how long before a table should be created. @@ -161,7 +161,7 @@ documentation. A table can be active or inactive. A table is considered **active** if the current time is within the range: -- Table start period - [`creation_grace_period`]({{< relref "../../../configure#table_manager" >}}) +- Table start period - [`creation_grace_period`](https://grafana.com/docs/loki//configure/#table_manager) - Table end period + max chunk age (hardcoded to `12h`) ![active_vs_inactive_tables](./table-manager-active-vs-inactive-tables.png) diff --git a/docs/sources/operations/storage/tsdb.md b/docs/sources/operations/storage/tsdb.md index d7c315bdf5e16..8f640f83f3bdd 100644 --- a/docs/sources/operations/storage/tsdb.md +++ b/docs/sources/operations/storage/tsdb.md @@ -73,7 +73,7 @@ We've added a user per-tenant limit called `tsdb_max_query_parallelism` in the ` ### Dynamic Query Sharding -Previously we would statically shard queries based on the index row shards configured [here]({{< relref "../../configure#period_config" >}}). +Previously we would statically shard queries based on the index row shards configured [here](https://grafana.com/docs/loki//configure/#period_config). TSDB does Dynamic Query Sharding based on how much data a query is going to be processing. We additionally store size(KB) and number of lines for each chunk in the TSDB index which is then used by the [Query Frontend]({{< relref "../../get-started/components#query-frontend" >}}) for planning the query. Based on our experience from operating many Loki clusters, we have configured TSDB to aim for processing 300-600 MBs of data per query shard. @@ -81,4 +81,4 @@ This means with TSDB we will be running more, smaller queries. ### Index Caching not required -TSDB is a compact and optimized format. Loki does not currently use an index cache for TSDB. If you are already using Loki with other index types, it is recommended to keep the index caching until all of your existing data falls out of [retention]({{< relref "./retention" >}}) or your configured `max_query_lookback` under [limits_config]({{< relref "../../configure#limits_config" >}}). After that, we suggest running without an index cache (it isn't used in TSDB). +TSDB is a compact and optimized format. Loki does not currently use an index cache for TSDB. If you are already using Loki with other index types, it is recommended to keep the index caching until all of your existing data falls out of [retention](https://grafana.com/docs/loki//operations/storage/retention/)) or your configured `max_query_lookback` under [limits_config](https://grafana.com/docs/loki//configure/#limits_config). After that, we suggest running without an index cache (it isn't used in TSDB). diff --git a/docs/sources/release-notes/v2-4.md b/docs/sources/release-notes/v2-4.md index 98c61a9d8c6eb..6465503e3df34 100644 --- a/docs/sources/release-notes/v2-4.md +++ b/docs/sources/release-notes/v2-4.md @@ -13,9 +13,9 @@ Loki 2.4 focuses on two items: ## Features and enhancements -* [**Loki no longer requires logs to be sent in perfect chronological order.**]({{< relref "../configure#accept-out-of-order-writes" >}}) Support for out of order logs is one of the most highly requested features for Loki. The strict ordering constraint has been removed. +* [**Loki no longer requires logs to be sent in perfect chronological order.**](https://grafana.com/docs/loki//configure/#accept-out-of-order-writes) Support for out of order logs is one of the most highly requested features for Loki. The strict ordering constraint has been removed. * Scaling Loki is now easier with a hybrid deployment mode that falls between our single binary and our microservices. The [Simple scalable deployment]({{< relref "../get-started/deployment-modes" >}}) scales Loki with new `read` and `write` targets. Where previously you would have needed Kubernetes and the microservices approach to start tapping into Loki’s potential, it’s now possible to do this in a simpler way. -* The new [`common` section]({{< relref "../configure#common" >}}) results in a 70% smaller Loki configuration. Pair that with updated defaults and Loki comes out of the box with more appropriate defaults and limits. Check out the [example local configuration](https://github.com/grafana/loki/blob/main/cmd/loki/loki-local-config.yaml) as the new reference for running Loki. +* The new [`common` section](https://grafana.com/docs/loki//configure/#common) results in a 70% smaller Loki configuration. Pair that with updated defaults and Loki comes out of the box with more appropriate defaults and limits. Check out the [example local configuration](https://github.com/grafana/loki/blob/main/cmd/loki/loki-local-config.yaml) as the new reference for running Loki. * [**Recording rules**]({{< relref "../alert#recording-rules" >}}) are no longer an experimental feature. We've given them a more resilient implementation which leverages the existing write ahead log code in Prometheus. * The new [**Promtail Kafka Consumer**]({{< relref "../send-data/promtail/scraping#kafka" >}}) can easily get your logs out of Kafka and into Loki. * There are **nice LogQL enhancements**, thanks to the amazing Loki community. LogQL now has [group_left and group_right]({{< relref "../query#many-to-one-and-one-to-many-vector-matches" >}}). And, the `label_format` and `line_format` functions now support [working with dates and times]({{< relref "../query/template_functions#now" >}}). diff --git a/docs/sources/release-notes/v2-5.md b/docs/sources/release-notes/v2-5.md index ea7d015479a1c..9ffc947349a7d 100644 --- a/docs/sources/release-notes/v2-5.md +++ b/docs/sources/release-notes/v2-5.md @@ -14,7 +14,7 @@ It has been nearly 6 months since Loki 2.4 was released, and we’ve been busy m - **[Binary operations are now significantly faster](https://github.com/grafana/loki/pull/5317)**, taking full advantage of Loki's parallelism. - **[A new schema is available](https://github.com/grafana/loki/pull/5054)**, which uses more path prefixes to avoid rate limits on S3. - That same schema change **[was also added to the filesystem store](https://github.com/grafana/loki/pull/5291)**, which avoids using one directory to store every chunk. -- A new capability for **[hedging requests to storage](https://github.com/grafana/loki/pull/4826)** improves performance on highly parallelized queries. Refer to the [hedging configuration]({{< relref "../configure#storage_config" >}}) under the `storage_config` block for more information. +- A new capability for **[hedging requests to storage](https://github.com/grafana/loki/pull/4826)** improves performance on highly parallelized queries. Refer to the [hedging configuration](https://grafana.com/docs/loki//configure/#storage_config) under the `storage_config` block for more information. - Promtail has several new ways to ingest logs: - The **[ability to do service discovery and tailing directly from the Docker daemon](https://github.com/grafana/loki/pull/4911)**. - **[Fetching logs directly from Cloudflare](https://github.com/grafana/loki/pull/4813)**. diff --git a/docs/sources/send-data/fluentbit/_index.md b/docs/sources/send-data/fluentbit/_index.md index c35925b521d0e..b981445a19ace 100644 --- a/docs/sources/send-data/fluentbit/_index.md +++ b/docs/sources/send-data/fluentbit/_index.md @@ -229,7 +229,7 @@ Buffering refers to the ability to store the records somewhere, and while they a The blocking state with some of the input plugins is not acceptable, because it can have an undesirable side effect on the part that generates the logs. Fluent Bit implements a buffering mechanism that is based on parallel processing. Therefore, it cannot send logs in order. There are two ways of handling the out-of-order logs: -- Configure Loki to [accept out-of-order writes]({{< relref "../../configure#accept-out-of-order-writes" >}}). +- Configure Loki to [accept out-of-order writes](https://grafana.com/docs/loki//configure/#accept-out-of-order-writes). - Configure the Loki output plugin to use the buffering mechanism based on [`dque`](https://github.com/joncrlsn/dque), which is compatible with the Loki server strict time ordering: diff --git a/docs/sources/send-data/fluentd/_index.md b/docs/sources/send-data/fluentd/_index.md index 61195d04dda23..6adcca920bd44 100644 --- a/docs/sources/send-data/fluentd/_index.md +++ b/docs/sources/send-data/fluentd/_index.md @@ -154,7 +154,7 @@ Use with the `remove_keys kubernetes` option to eliminate metadata from the log. ### Multi-worker usage -Loki enables out-of-order inserts by default; refer to [accept out-of-order writes]({{< relref "../../configure#accept-out-of-order-writes" >}}). +Loki enables out-of-order inserts by default; refer to [accept out-of-order writes](https://grafana.com/docs/loki//configure/#accept-out-of-order-writes). If out-of-order inserts are _disabled_, attempting to insert a log entry with an earlier timestamp after a log entry with identical labels but a later timestamp, the insert will fail with `HTTP status code: 500, message: rpc error: code = Unknown desc = Entry out of order`. Therefore, in order to use this plugin in a multi worker Fluentd setup, you'll need to include the worker ID in the labels or otherwise [ensure log streams are always sent to the same worker](https://docs.fluentd.org/deployment/multi-process-workers#less-than-worker-n-greater-than-directive). For example, using [fluent-plugin-record-modifier](https://github.com/repeatedly/fluent-plugin-record-modifier): diff --git a/docs/sources/send-data/lambda-promtail/_index.md b/docs/sources/send-data/lambda-promtail/_index.md index f12af9b925bc7..49b1795a7c303 100644 --- a/docs/sources/send-data/lambda-promtail/_index.md +++ b/docs/sources/send-data/lambda-promtail/_index.md @@ -241,4 +241,4 @@ Instead we can pipeline Cloudwatch logs to a set of Promtails, which can mitigat 1) Using Promtail's push api along with the `use_incoming_timestamp: false` config, we let Promtail determine the timestamp based on when it ingests the logs, not the timestamp assigned by cloudwatch. Obviously, this means that we lose the origin timestamp because Promtail now assigns it, but this is a relatively small difference in a real time ingestion system like this. 2) In conjunction with (1), Promtail can coalesce logs across Cloudwatch log streams because it's no longer susceptible to out-of-order errors when combining multiple sources (lambda invocations). -One important aspect to keep in mind when running with a set of Promtails behind a load balancer is that we're effectively moving the cardinality problems from the number of log streams -> number of Promtails. If you have not configured Loki to [accept out-of-order writes]({{< relref "../../configure#accept-out-of-order-writes" >}}), you'll need to assign a Promtail-specific label on each Promtail so that you don't run into out-of-order errors when the Promtails send data for the same log groups to Loki. This can easily be done via a configuration like `--client.external-labels=promtail=${HOSTNAME}` passed to Promtail. +One important aspect to keep in mind when running with a set of Promtails behind a load balancer is that we're effectively moving the cardinality problems from the number of log streams -> number of Promtails. If you have not configured Loki to [accept out-of-order writes](https://grafana.com/docs/loki//configure/#accept-out-of-order-writes), you'll need to assign a Promtail-specific label on each Promtail so that you don't run into out-of-order errors when the Promtails send data for the same log groups to Loki. This can easily be done via a configuration like `--client.external-labels=promtail=${HOSTNAME}` passed to Promtail. diff --git a/docs/sources/send-data/otel/_index.md b/docs/sources/send-data/otel/_index.md index 9159eea9be6b3..d53b78a7a36ff 100644 --- a/docs/sources/send-data/otel/_index.md +++ b/docs/sources/send-data/otel/_index.md @@ -113,7 +113,7 @@ Things to note before ingesting OpenTelemetry logs to Loki: ### Changing the default mapping of OTLP to Loki Format -Loki supports [per tenant]({{< relref "../../configure#limits_config" >}}) OTLP config which lets you change the default mapping of OTLP to Loki format for each tenant. +Loki supports [per tenant](https://grafana.com/docs/loki//configure/#limits_config) OTLP config which lets you change the default mapping of OTLP to Loki format for each tenant. It currently only supports changing the storage of Attributes. Here is how the config looks like: ```yaml diff --git a/docs/sources/send-data/promtail/_index.md b/docs/sources/send-data/promtail/_index.md index dd12165face63..7e560e661438a 100644 --- a/docs/sources/send-data/promtail/_index.md +++ b/docs/sources/send-data/promtail/_index.md @@ -95,7 +95,7 @@ Important details are: to resume work from the last scraped line and process the rest of the remaining 55%. * Since decompression and pushing can be very fast, depending on the size of your compressed file Loki will rate-limit your ingestion. In that case you - might configure Promtail's [`limits` stage]({{< relref "./stages/limit" >}}) to slow the pace or increase [ingestion limits on Loki]({{< relref "../../configure#limits_config" >}}) + might configure Promtail's [`limits` stage](https://grafana.com/docs/loki//send-data/promtail/configuration/#limits_config) to slow the pace or increase [ingestion limits](https://grafana.com/docs/loki//configure/#limits_config) on Loki. * Log rotations on compressed files **are not supported as of now** (log rotation is fully supported for normal files), mostly because it requires us modifying Promtail to rely on file inodes instead of file names. If you'd like to see support for it, create a new diff --git a/docs/sources/send-data/promtail/stages/limit.md b/docs/sources/send-data/promtail/stages/limit.md index d70c7bc5a9989..e7a85f13bcd3a 100644 --- a/docs/sources/send-data/promtail/stages/limit.md +++ b/docs/sources/send-data/promtail/stages/limit.md @@ -14,7 +14,7 @@ The `limit` stage is a rate-limiting stage that throttles logs based on several ## Limit stage schema This pipeline stage places limits on the rate or burst quantity of log lines that Promtail pushes to Loki. -The concept of having distinct burst and rate limits mirrors the approach to limits that can be set for Loki's distributor component: `ingestion_rate_mb` and `ingestion_burst_size_mb`, as defined in [limits_config]({{< relref "../../../configure#limits_config" >}}). +The concept of having distinct burst and rate limits mirrors the approach to limits that can be set for Loki's distributor component: `ingestion_rate_mb` and `ingestion_burst_size_mb`, as defined in [limits_config](https://grafana.com/docs/loki//configure/#limits_config). ```yaml limit: diff --git a/docs/sources/send-data/promtail/troubleshooting/_index.md b/docs/sources/send-data/promtail/troubleshooting/_index.md index fb678d8b0bbe0..b06f0af4e2af7 100644 --- a/docs/sources/send-data/promtail/troubleshooting/_index.md +++ b/docs/sources/send-data/promtail/troubleshooting/_index.md @@ -201,7 +201,7 @@ from there. This means that if new log entries have been read and pushed to the ingester between the last sync period and the crash, these log entries will be sent again to the ingester on Promtail restart. -If Loki is not configured to [accept out-of-order writes]({{< relref "../../../configure#accept-out-of-order-writes" >}}), Loki will reject all log lines received in +If Loki is not configured to [accept out-of-order writes](https://grafana.com/docs/loki//configure/#accept-out-of-order-writes), Loki will reject all log lines received in what it perceives is out of order. If Promtail happens to crash, it may re-send log lines that were sent prior to the crash. The default diff --git a/docs/sources/setup/migrate/migrate-to-tsdb/_index.md b/docs/sources/setup/migrate/migrate-to-tsdb/_index.md index e718f666a951e..963913e21ef9e 100644 --- a/docs/sources/setup/migrate/migrate-to-tsdb/_index.md +++ b/docs/sources/setup/migrate/migrate-to-tsdb/_index.md @@ -17,7 +17,7 @@ we strongly recommend migrating to TSDB. ### Configure TSDB index for an upcoming period -To begin the migration, add a new [period_config]({{< relref "../../../configure#period_config" >}}) entry in your [schema_config]({{< relref "../../../configure#schema_config" >}}). +To begin the migration, add a new [period_config](https://grafana.com/docs/loki//configure/#period_config) entry in your [schema_config](https://grafana.com/docs/loki//configure/#schema_config). You can read more about schema config [here](https://grafana.com/docs/loki//configure/storage/#schema-config). {{% admonition type="note" %}} @@ -45,17 +45,17 @@ schema_config: period: 24h ``` -① You must set the new period `from` to a date in the future. +1. You must set the new period `from` to a date in the future. -② Update the new period to use TSDB as the index type by setting `store: tsdb`. +1. Update the new period to use TSDB as the index type by setting `store: tsdb`. -③ This sample configuration uses filesystem as the storage in both the periods. If you want to use a different storage for the TSDB index and chunks, you can specify a different `object_store` in the new period. +1. This sample configuration uses filesystem as the storage in both the periods. If you want to use a different storage for the TSDB index and chunks, you can specify a different `object_store` in the new period. -④ Update the schema to v13 which is the recommended version at the time of writing. Please refer to the [configure page]({{< relref "../../../configure#period_config" >}}) for the current recommend version. +1. Update the schema to v13 which is the recommended version at the time of writing. Please refer to the [configure page](https://grafana.com/docs/loki//configure/#period_config) for the current recommend version. ### Configure TSDB shipper -It's also important that you configure the `tsdb_shipper` block in [storage_config]({{< relref "../../../configure#storage_config" >}}). Specifically the following options: +It's also important that you configure the `tsdb_shipper` block in [storage_config](https://grafana.com/docs/loki//configure/#storage_config). Specifically the following options: - `active_index_directory`: directory where ingesters would write index files which will then be uploaded by shipper to configured storage. - `cache_location`: cache location for downloading index files from the storage for use in query path.