From 94ab31413ffeee3523e1062ac707909e53eab919 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0imon=20Luka=C5=A1=C3=ADk?= Date: Thu, 19 Oct 2023 20:21:00 +0200 Subject: [PATCH 01/33] Fix docs for fluentd client (#10639) Fluentd client configuration has changed recently. The default config already [reflects](https://github.com/grafana/loki/blob/main/clients/cmd/fluentd/docker/conf/loki.conf#L5-L8) the new syntax. However, the examples on the page haven't been updated yet. --- docs/sources/send-data/fluentd/_index.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/sources/send-data/fluentd/_index.md b/docs/sources/send-data/fluentd/_index.md index c15b0160f5e5d..bdf242e81b57e 100644 --- a/docs/sources/send-data/fluentd/_index.md +++ b/docs/sources/send-data/fluentd/_index.md @@ -80,8 +80,10 @@ In your Fluentd configuration, add `@type loki`. Additional configuration is opt username "#{ENV['LOKI_USERNAME']}" password "#{ENV['LOKI_PASSWORD']}" extra_labels {"env":"dev"} - flush_interval 10s - flush_at_shutdown true + + flush_interval 10s + flush_at_shutdown true + buffer_chunk_limit 1m ``` From fed9f14970664ff3aadfaa21721149f333f37526 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0imon=20Luka=C5=A1=C3=ADk?= Date: Thu, 19 Oct 2023 21:10:30 +0200 Subject: [PATCH 02/33] clients: update fluentd base container (#10966) 1.14.0 is over 2 years old and the latest 1.16 continues to work just fine with loki Co-authored-by: Vladyslav Diachenko <82767850+vlad-diachenko@users.noreply.github.com> --- clients/cmd/fluentd/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/cmd/fluentd/Dockerfile b/clients/cmd/fluentd/Dockerfile index caa411889feae..78138d8f02791 100644 --- a/clients/cmd/fluentd/Dockerfile +++ b/clients/cmd/fluentd/Dockerfile @@ -9,7 +9,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make BUILD_IN_CONTAINER=false fluentd-plugin -FROM fluent/fluentd:v1.14.0-debian-1.0 +FROM fluent/fluentd:v1.16-debian-1 ENV LOKI_URL "https://logs-prod-us-central1.grafana.net" COPY --from=build /src/loki/clients/cmd/fluentd/lib/fluent/plugin/out_loki.rb /fluentd/plugins/out_loki.rb From 54937784f989bdf3117596a61fd7d60bc9d4b05f Mon Sep 17 00:00:00 2001 From: J Stickler Date: Thu, 19 Oct 2023 15:10:51 -0400 Subject: [PATCH 03/33] Revert scalable monolithic to SSD (#10958) After consultation with @stevendungan, reverting the terminology "Scalable monolithic deployment" back to "Simple scalable deployment." Decided to add a note about the confusion that SSD causes to clarify that we don't mean solid state drives. FYI @monodot --------- Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> --- docs/sources/get-started/deployment-modes.md | 22 +++++++++++-------- docs/sources/operations/authentication.md | 2 +- .../install/helm/install-scalable/_index.md | 6 ++--- docs/sources/setup/size/_index.md | 2 +- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/docs/sources/get-started/deployment-modes.md b/docs/sources/get-started/deployment-modes.md index 330184681b1df..67e9c46294a74 100644 --- a/docs/sources/get-started/deployment-modes.md +++ b/docs/sources/get-started/deployment-modes.md @@ -8,23 +8,27 @@ aliases: --- # Loki deployment modes -Loki is a distributed system consisting of many microservices. It also has a unique build model where all of those microservices exist within the same binary. +Loki is a distributed system consisting of many microservices. It also has a unique build model where all of those microservices exist within the same binary. You can configure the behavior of the single binary with the `-target` command-line flag to specify which microservices will run on startup. You can further configure each of the components in the `loki.yaml` file. Because Loki decouples the data it stores from the software which ingests and queries it, you can easily redeploy a cluster under a different mode as your needs change, with minimal or no configuration changes. -## Scalable monolithic +## Simple Scalable -The scalable monolithic deployment mode, previously referred to as a simple scalable deployment (SSD), is the preferred way to deploy Loki for most installations. The scalable monolithic deployment is the default configuration installed by the [Loki Helm Chart]({{< relref "../setup/install/helm" >}}). This deployment mode is the easiest way to deploy Loki at scale. It strikes a balance between deploying in [monolithic mode](#monolithic-mode) or deploying each component as a [separate microservice](#microservices-mode). +The simple scalable deployment mode, is the preferred way to deploy Loki for most installations. The simple scalable deployment is the default configuration installed by the [Loki Helm Chart]({{< relref "../setup/install/helm" >}}). This deployment mode is the easiest way to deploy Loki at scale. It strikes a balance between deploying in [monolithic mode](#monolithic-mode) or deploying each component as a [separate microservice](#microservices-mode). -Loki’s scalable monolithic deployment mode separates execution paths into read, write, and backend targets. These targets can be scaled independently, letting you customize your Loki deployment to meet your business needs for log ingestion and log query so that your infrastructure costs better match how you use Loki. +{{% admonition type="note" %}} +This deployment mode is sometimes referred to by the acronym SSD for simple scalable deployment, not to be confused with solid state drives. Loki uses an object store. +{{% /admonition %}} -The scalable monolithic deployment mode can scale up to a few TBs of logs per day, however if you go much beyond this, the microservices mode will be a better choice for most users. +Loki’s simple scalable deployment mode separates execution paths into read, write, and backend targets. These targets can be scaled independently, letting you customize your Loki deployment to meet your business needs for log ingestion and log query so that your infrastructure costs better match how you use Loki. -![Scalable monolithic mode diagram](../scalable-monolithic-mode.png "Scalable monolithic mode") +The simple scalable deployment mode can scale up to a few TBs of logs per day, however if you go much beyond this, the microservices mode will be a better choice for most users. -The three execution paths in scalable monolithic mode are each activated by appending the following arguments to Loki on startup: +![Simple scalable mode diagram](../scalable-monolithic-mode.png "Simple scalable mode") + +The three execution paths in simple scalable mode are each activated by appending the following arguments to Loki on startup: - `-target=write` - The write target is stateful and is controlled by a Kubernetes StatefulSet. It contains the following components: -- Distributor @@ -38,7 +42,7 @@ The three execution paths in scalable monolithic mode are each activated by appe -- Query scheduler -- Ruler -The scalable monolithic deployment mode requires a reverse proxy to be deployed in front of Loki, to direct client API requests to either the read or write nodes. The Loki Helm chart includes a default reverse proxy configuration, using Nginx. +The simple scalable deployment mode requires a reverse proxy to be deployed in front of Loki, to direct client API requests to either the read or write nodes. The Loki Helm chart includes a default reverse proxy configuration, using Nginx. ## Monolithic mode @@ -48,7 +52,7 @@ The simplest mode of operation is the monolithic deployment mode. You enable mon Monolithic mode is useful for getting started quickly to experiment with Loki, as well as for small read/write volumes of up to approximately 20GB per day. -You can horizontally scale a monolithic mode deployment to more instances by using a shared object store, and by configuring the [`ring` section]({{< relref "../configure#common" >}}) of the `loki.yaml` file to share state between all instances, but the recommendation is to use scalable monolithic mode if you need to scale your deployment. +You can horizontally scale a monolithic mode deployment to more instances by using a shared object store, and by configuring the [`ring` section]({{< relref "../configure#common" >}}) of the `loki.yaml` file to share state between all instances, but the recommendation is to use simple scalable mode if you need to scale your deployment. You can configure high availability by running two Loki instances using `memberlist_config` configuration and a shared object store and setting the `replication_factor` to `3`. You route traffic to all the Loki instances in a round robin fashion. diff --git a/docs/sources/operations/authentication.md b/docs/sources/operations/authentication.md index 44c1df1b1c7fc..065cd207a5811 100644 --- a/docs/sources/operations/authentication.md +++ b/docs/sources/operations/authentication.md @@ -9,7 +9,7 @@ weight: Grafana Loki does not come with any included authentication layer. Operators are expected to run an authenticating reverse proxy in front of your services. -The scalable monolithic [deployment mode]({{< relref "../get-started/deployment-modes" >}}) requires a reverse proxy to be deployed in front of Loki, to direct client API requests to either the read or write nodes. The Loki Helm chart includes a default reverse proxy configuration, using Nginx. +The simple scalable [deployment mode]({{< relref "../get-started/deployment-modes" >}}) requires a reverse proxy to be deployed in front of Loki, to direct client API requests to either the read or write nodes. The Loki Helm chart includes a default reverse proxy configuration, using Nginx. A list of open-source reverse proxies you can use: diff --git a/docs/sources/setup/install/helm/install-scalable/_index.md b/docs/sources/setup/install/helm/install-scalable/_index.md index b22f41a39d2b7..3abd69fd8f752 100644 --- a/docs/sources/setup/install/helm/install-scalable/_index.md +++ b/docs/sources/setup/install/helm/install-scalable/_index.md @@ -1,5 +1,5 @@ --- -title: Install the scalable monolithic Helm chart +title: Install the simple scalable Helm chart menuTitle: Install scalable Loki description: Install Loki in scalable mode. aliases: @@ -9,13 +9,13 @@ weight: 300 keywords: --- -# Install the scalable monolithic Helm chart +# Install the simple scalable Helm chart This Helm Chart installation runs the Grafana Loki cluster within a Kubernetes cluster. -If object storge is configured, this chart configures Loki to run `read` and `write` targets in a [scalable mode]({{< relref "../../../../get-started/deployment-modes#scalable-monolithic-deployment-mode" >}}), highly available architecture (3 replicas of each) designed to work with AWS S3 object storage. It will also configure meta-monitoring of metrics and logs. +If object storge is configured, this chart configures Loki to run `read` and `write` targets in a [scalable mode]({{< relref "../../../../get-started/deployment-modes#simple-scalable" >}}), highly available architecture (3 replicas of each) designed to work with AWS S3 object storage. It will also configure meta-monitoring of metrics and logs. It is not possible to run the scalable mode with the `filesystem` storage. diff --git a/docs/sources/setup/size/_index.md b/docs/sources/setup/size/_index.md index 7eb24046c8f96..7198db4e2eb4a 100644 --- a/docs/sources/setup/size/_index.md +++ b/docs/sources/setup/size/_index.md @@ -17,7 +17,7 @@ keywords: [] This tool helps to generate a Helm Charts `values.yaml` file based on specified expected ingestion, retention rate and node type. It will always configure a - [scalable]({{< relref "../../get-started/deployment-modes#scalable-monolithic-deployment-mode" >}}) deployment. The storage needs to be configured after generation. + [scalable]({{< relref "../../get-started/deployment-modes#simple-scalable" >}}) deployment. The storage needs to be configured after generation.
From 86455195f31185d44b328b08a2e7b4e0d3c9ae9a Mon Sep 17 00:00:00 2001 From: Kevin Burek Date: Thu, 19 Oct 2023 16:27:38 -0400 Subject: [PATCH 04/33] Docs: LogQL `offset` modifier (#10960) Bring back offset modifier doc section from https://github.com/grafana/loki/pull/3455 and lost in https://github.com/grafana/loki/pull/4012 **What this PR does / why we need it**: **Which issue(s) this PR fixes**: Fixes regression from https://github.com/grafana/loki/issues/2785 **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [x] Documentation added - [ ] Tests updated **N/A** - [ ] `CHANGELOG.md` updated **N/A** - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label **N/A** - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` **N/A** - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) **N/A** --- docs/sources/query/metric_queries.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/sources/query/metric_queries.md b/docs/sources/query/metric_queries.md index 5501d2539cd37..ca4fe38b09cae 100644 --- a/docs/sources/query/metric_queries.md +++ b/docs/sources/query/metric_queries.md @@ -57,6 +57,15 @@ Examples: sum by (host) (rate({job="mysql"} |= "error" != "timeout" | json | duration > 10s [1m])) ``` +#### Offset modifier +The offset modifier allows changing the time offset for individual range vectors in a query. + +For example, the following expression counts all the logs within the last ten minutes to five minutes rather than last five minutes for the MySQL job. Note that the `offset` modifier always needs to follow the range vector selector immediately. +```logql +count_over_time({job="mysql"}[5m] offset 5m) // GOOD +count_over_time({job="mysql"}[5m]) offset 5m // INVALID +``` + ### Unwrapped range aggregations Unwrapped ranges uses extracted labels as sample values instead of log lines. However to select which label will be used within the aggregation, the log query must end with an unwrap expression and optionally a label filter expression to discard [errors]({{< relref ".#pipeline-errors" >}}). From 9474be0fef204cafabfd5186e6984909e63c8f37 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 20 Oct 2023 10:46:48 +0200 Subject: [PATCH 05/33] Update module github.com/hashicorp/consul to v1.14.5 [SECURITY] (main) (#10830) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [github.com/hashicorp/consul](https://togithub.com/hashicorp/consul) | replace | minor | `v1.5.1` -> `v1.14.5` | --- ### Denial of Service (DoS) in HashiCorp Consul [CVE-2020-7219](https://nvd.nist.gov/vuln/detail/CVE-2020-7219) / [GHSA-23jv-v6qj-3fhh](https://togithub.com/advisories/GHSA-23jv-v6qj-3fhh)
More information #### Details HashiCorp Consul and Consul Enterprise up to 1.6.2 HTTP/RPC services allowed unbounded resource usage, and were susceptible to unauthenticated denial of service. Fixed in 1.6.3. ##### Specific Go Packages Affected github.com/hashicorp/consul/agent/consul #### Severity - CVSS Score: 7.5 / 10 (High) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2020-7219](https://nvd.nist.gov/vuln/detail/CVE-2020-7219) - [https://github.com/hashicorp/consul/issues/7159](https://togithub.com/hashicorp/consul/issues/7159) - [https://www.hashicorp.com/blog/category/consul/](https://www.hashicorp.com/blog/category/consul/) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-23jv-v6qj-3fhh) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### Incorrect Authorization in HashiCorp Consul [CVE-2020-7955](https://nvd.nist.gov/vuln/detail/CVE-2020-7955) / [GHSA-r9w6-rhh9-7v53](https://togithub.com/advisories/GHSA-r9w6-rhh9-7v53)
More information #### Details HashiCorp Consul and Consul Enterprise 1.4.1 through 1.6.2 did not uniformly enforce ACLs across all API endpoints, resulting in potential unintended information disclosure. Fixed in 1.6.3. #### Severity - CVSS Score: 5.3 / 10 (Medium) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2020-7955](https://nvd.nist.gov/vuln/detail/CVE-2020-7955) - [https://github.com/hashicorp/consul/issues/7160](https://togithub.com/hashicorp/consul/issues/7160) - [https://www.hashicorp.com/blog/category/consul/](https://www.hashicorp.com/blog/category/consul/) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-r9w6-rhh9-7v53) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### Allocation of Resources Without Limits or Throttling in Hashicorp Consul [CVE-2020-13250](https://nvd.nist.gov/vuln/detail/CVE-2020-13250) / [GHSA-rqjq-mrgx-85hp](https://togithub.com/advisories/GHSA-rqjq-mrgx-85hp)
More information #### Details HashiCorp Consul and Consul Enterprise include an HTTP API (introduced in 1.2.0) and DNS (introduced in 1.4.3) caching feature that was vulnerable to denial of service. ##### Specific Go Packages Affected github.com/hashicorp/consul/agent/config ##### Fix The vulnerability is fixed in versions 1.6.6 and 1.7.4. #### Severity - CVSS Score: 7.5 / 10 (High) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2020-13250](https://nvd.nist.gov/vuln/detail/CVE-2020-13250) - [https://github.com/hashicorp/consul/pull/8023](https://togithub.com/hashicorp/consul/pull/8023) - [https://github.com/hashicorp/consul/commit/72f92ae7ca4cabc1dc3069362a9b64ef46941432](https://togithub.com/hashicorp/consul/commit/72f92ae7ca4cabc1dc3069362a9b64ef46941432) - [https://github.com/hashicorp/consul/blob/v1.6.6/CHANGELOG.md](https://togithub.com/hashicorp/consul/blob/v1.6.6/CHANGELOG.md) - [https://github.com/hashicorp/consul/blob/v1.7.4/CHANGELOG.md](https://togithub.com/hashicorp/consul/blob/v1.7.4/CHANGELOG.md) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-rqjq-mrgx-85hp) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### HashiCorp Consul Cross-site Scripting vulnerability [CVE-2020-25864](https://nvd.nist.gov/vuln/detail/CVE-2020-25864) / [GHSA-8xmx-h8rq-h94j](https://togithub.com/advisories/GHSA-8xmx-h8rq-h94j)
More information #### Details HashiCorp Consul and Consul Enterprise up to version 1.9.4 key-value (KV) raw mode was vulnerable to cross-site scripting. Fixed in 1.9.5, 1.8.10 and 1.7.14. #### Severity - CVSS Score: 6.1 / 10 (Medium) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2020-25864](https://nvd.nist.gov/vuln/detail/CVE-2020-25864) - [https://discuss.hashicorp.com/t/hcsec-2021-07-consul-api-kv-endpoint-vulnerable-to-cross-site-scripting/23368](https://discuss.hashicorp.com/t/hcsec-2021-07-consul-api-kv-endpoint-vulnerable-to-cross-site-scripting/23368) - [https://github.com/hashicorp/consul](https://togithub.com/hashicorp/consul) - [https://security.gentoo.org/glsa/202208-09](https://security.gentoo.org/glsa/202208-09) - [https://www.hashicorp.com/blog/category/consul](https://www.hashicorp.com/blog/category/consul) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-8xmx-h8rq-h94j) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### HashiCorp Consul Privilege Escalation Vulnerability [CVE-2021-37219](https://nvd.nist.gov/vuln/detail/CVE-2021-37219) / [GHSA-ccw8-7688-vqx4](https://togithub.com/advisories/GHSA-ccw8-7688-vqx4)
More information #### Details HashiCorp Consul and Consul Enterprise 1.10.1 Raft RPC layer allows non-server agents with a valid certificate signed by the same CA to access server-only functionality, enabling privilege escalation. Fixed in 1.8.15, 1.9.9 and 1.10.2. #### Severity - CVSS Score: 8.8 / 10 (High) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2021-37219](https://nvd.nist.gov/vuln/detail/CVE-2021-37219) - [https://github.com/hashicorp/consul/pull/10925](https://togithub.com/hashicorp/consul/pull/10925) - [https://github.com/hashicorp/consul/commit/3357e57dac9aadabd476f7a14973e47f003c4cf0](https://togithub.com/hashicorp/consul/commit/3357e57dac9aadabd476f7a14973e47f003c4cf0) - [https://github.com/hashicorp/consul/commit/473edd1764b6739e2e4610ea5dede4c2bc6009d1](https://togithub.com/hashicorp/consul/commit/473edd1764b6739e2e4610ea5dede4c2bc6009d1) - [https://github.com/hashicorp/consul/commit/ccf8eb1947357434eb6e66303ddab79f4c9d4103](https://togithub.com/hashicorp/consul/commit/ccf8eb1947357434eb6e66303ddab79f4c9d4103) - [https://discuss.hashicorp.com/t/hcsec-2021-22-consul-raft-rpc-privilege-escalation/29024](https://discuss.hashicorp.com/t/hcsec-2021-22-consul-raft-rpc-privilege-escalation/29024) - [https://github.com/hashicorp/consul](https://togithub.com/hashicorp/consul) - [https://security.gentoo.org/glsa/202207-01](https://security.gentoo.org/glsa/202207-01) - [https://www.hashicorp.com/blog/category/consul](https://www.hashicorp.com/blog/category/consul) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-ccw8-7688-vqx4) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### HashiCorp Consul and Consul Enterprise 1.10.1 Txn.Apply endpoint allowed services to register proxies for other services, enabling access to service traffic. [CVE-2021-38698](https://nvd.nist.gov/vuln/detail/CVE-2021-38698) / [GHSA-6hw5-6gcx-phmw](https://togithub.com/advisories/GHSA-6hw5-6gcx-phmw)
More information #### Details HashiCorp Consul and Consul Enterprise 1.10.1 Txn.Apply endpoint allowed services to register proxies for other services, enabling access to service traffic. Fixed in 1.8.15, 1.9.9 and 1.10.2. #### Severity - CVSS Score: 6.5 / 10 (Medium) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:N/A:N` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2021-38698](https://nvd.nist.gov/vuln/detail/CVE-2021-38698) - [https://github.com/hashicorp/consul/pull/10824](https://togithub.com/hashicorp/consul/pull/10824) - [https://discuss.hashicorp.com/t/hcsec-2021-24-consul-missing-authorization-check-on-txn-apply-endpoint/29026](https://discuss.hashicorp.com/t/hcsec-2021-24-consul-missing-authorization-check-on-txn-apply-endpoint/29026) - [https://github.com/hashicorp/consul](https://togithub.com/hashicorp/consul) - [https://security.gentoo.org/glsa/202208-09](https://security.gentoo.org/glsa/202208-09) - [https://www.hashicorp.com/blog/category/consul](https://www.hashicorp.com/blog/category/consul) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-6hw5-6gcx-phmw) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### Hashicorp Consul HTTP health check endpoints returning an HTTP redirect may be abused as SSRF vector [CVE-2022-29153](https://nvd.nist.gov/vuln/detail/CVE-2022-29153) / [GHSA-q6h7-4qgw-2j9p](https://togithub.com/advisories/GHSA-q6h7-4qgw-2j9p)
More information #### Details A vulnerability was identified in Consul and Consul Enterprise (“Consul”) such that HTTP health check endpoints returning an HTTP redirect may be abused as a vector for server-side request forgery (SSRF). This vulnerability, CVE-2022-29153, was fixed in Consul 1.9.17, 1.10.10, and 1.11.5. #### Severity - CVSS Score: 7.5 / 10 (High) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2022-29153](https://nvd.nist.gov/vuln/detail/CVE-2022-29153) - [https://discuss.hashicorp.com](https://discuss.hashicorp.com) - [https://discuss.hashicorp.com/t/hcsec-2022-10-consul-s-http-health-check-may-allow-server-side-request-forgery/](https://discuss.hashicorp.com/t/hcsec-2022-10-consul-s-http-health-check-may-allow-server-side-request-forgery/) - [https://discuss.hashicorp.com/t/hcsec-2022-10-consul-s-http-health-check-may-allow-server-side-request-forgery/38393](https://discuss.hashicorp.com/t/hcsec-2022-10-consul-s-http-health-check-may-allow-server-side-request-forgery/38393) - [https://github.com/hashicorp/consul](https://togithub.com/hashicorp/consul) - [https://lists.fedoraproject.org/archives/list/package-announce%40lists.fedoraproject.org/message/RBODKZL7HQE5XXS3SA2VIDVL4LAA5RWH/](https://lists.fedoraproject.org/archives/list/package-announce%40lists.fedoraproject.org/message/RBODKZL7HQE5XXS3SA2VIDVL4LAA5RWH/) - [https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/RBODKZL7HQE5XXS3SA2VIDVL4LAA5RWH/](https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/RBODKZL7HQE5XXS3SA2VIDVL4LAA5RWH/) - [https://security.gentoo.org/glsa/202208-09](https://security.gentoo.org/glsa/202208-09) - [https://security.netapp.com/advisory/ntap-20220602-0005/](https://security.netapp.com/advisory/ntap-20220602-0005/) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-q6h7-4qgw-2j9p) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### Hashicorp Consul Missing SSL Certificate Validation [CVE-2021-32574](https://nvd.nist.gov/vuln/detail/CVE-2021-32574) / [GHSA-25gf-8qrr-g78r](https://togithub.com/advisories/GHSA-25gf-8qrr-g78r)
More information #### Details HashiCorp Consul before 1.10.1 (and Consul Enterprise) has Missing SSL Certificate Validation. xds does not ensure that the Subject Alternative Name of an upstream is validated. #### Severity - CVSS Score: 7.5 / 10 (High) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2021-32574](https://nvd.nist.gov/vuln/detail/CVE-2021-32574) - [https://discuss.hashicorp.com/t/hcsec-2021-17-consul-s-envoy-tls-configuration-did-not-validate-destination-service-subject-alternative-names/26856](https://discuss.hashicorp.com/t/hcsec-2021-17-consul-s-envoy-tls-configuration-did-not-validate-destination-service-subject-alternative-names/26856) - [https://github.com/hashicorp/consul/releases/tag/v1.10.1](https://togithub.com/hashicorp/consul/releases/tag/v1.10.1) - [https://security.gentoo.org/glsa/202208-09](https://security.gentoo.org/glsa/202208-09) - [https://www.hashicorp.com/blog/category/consul](https://www.hashicorp.com/blog/category/consul) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-25gf-8qrr-g78r) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### HashiCorp Consul L7 deny intention results in an allow action [CVE-2021-36213](https://nvd.nist.gov/vuln/detail/CVE-2021-36213) / [GHSA-8h2g-r292-j8xh](https://togithub.com/advisories/GHSA-8h2g-r292-j8xh)
More information #### Details In HashiCorp Consul before 1.10.1 (and Consul Enterprise), xds can generate a situation where a single L7 deny intention (with a default deny policy) results in an allow action. #### Severity - CVSS Score: 7.5 / 10 (High) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2021-36213](https://nvd.nist.gov/vuln/detail/CVE-2021-36213) - [https://discuss.hashicorp.com/t/hcsec-2021-16-consul-s-application-aware-intentions-deny-action-fails-open-when-combined-with-default-deny-policy/26855](https://discuss.hashicorp.com/t/hcsec-2021-16-consul-s-application-aware-intentions-deny-action-fails-open-when-combined-with-default-deny-policy/26855) - [https://github.com/hashicorp/consul/](https://togithub.com/hashicorp/consul/) - [https://github.com/hashicorp/consul/releases/tag/v1.10.1](https://togithub.com/hashicorp/consul/releases/tag/v1.10.1) - [https://security.gentoo.org/glsa/202208-09](https://security.gentoo.org/glsa/202208-09) - [https://www.hashicorp.com/blog/category/consul](https://www.hashicorp.com/blog/category/consul) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-8h2g-r292-j8xh) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### HashiCorp Consul vulnerable to authorization bypass [CVE-2022-40716](https://nvd.nist.gov/vuln/detail/CVE-2022-40716) / [GHSA-m69r-9g56-7mv8](https://togithub.com/advisories/GHSA-m69r-9g56-7mv8)
More information #### Details HashiCorp Consul and Consul Enterprise versions prior to 1.11.9, 1.12.5, and 1.13.2 do not check for multiple SAN URI values in a CSR on the internal RPC endpoint, enabling leverage of privileged access to bypass service mesh intentions. A specially crafted CSR sent directly to Consul’s internal server agent RPC endpoint can include multiple SAN URI values with additional service names. This issue has been fixed in versions 1.11.9, 1.12.5, and 1.13.2. There are no known workarounds. #### Severity - CVSS Score: 6.5 / 10 (Medium) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:H/A:N` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2022-40716](https://nvd.nist.gov/vuln/detail/CVE-2022-40716) - [https://github.com/hashicorp/consul/pull/14579](https://togithub.com/hashicorp/consul/pull/14579) - [https://github.com/hashicorp/consul/commit/8f6fb4f6fe9488b8ec37da71ac503081d7d3760b](https://togithub.com/hashicorp/consul/commit/8f6fb4f6fe9488b8ec37da71ac503081d7d3760b) - [https://discuss.hashicorp.com](https://discuss.hashicorp.com) - [https://discuss.hashicorp.com/t/hcsec-2022-20-consul-service-mesh-intention-bypass-with-malicious-certificate-signing-request/44628](https://discuss.hashicorp.com/t/hcsec-2022-20-consul-service-mesh-intention-bypass-with-malicious-certificate-signing-request/44628) - [https://github.com/hashicorp/consul](https://togithub.com/hashicorp/consul) - [https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/LYZOKMMVX4SIEHPJW3SJUQGMO5YZCPHC/](https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/LYZOKMMVX4SIEHPJW3SJUQGMO5YZCPHC/) - [https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/ZTE4ITXXPIWZEQ4HYQCB6N6GZIMWXDAI/](https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/ZTE4ITXXPIWZEQ4HYQCB6N6GZIMWXDAI/) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-m69r-9g56-7mv8) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### Hashicorp Consul vulnerable to denial of service [CVE-2023-1297](https://nvd.nist.gov/vuln/detail/CVE-2023-1297) / [GHSA-c57c-7hrj-6q6v](https://togithub.com/advisories/GHSA-c57c-7hrj-6q6v)
More information #### Details Consul and Consul Enterprise's cluster peering implementation contained a flaw whereby a peer cluster with service of the same name as a local service could corrupt Consul state, resulting in denial of service. This vulnerability was resolved in Consul 1.14.5, and 1.15.3 #### Severity - CVSS Score: 4.9 / 10 (Medium) - Vector String: `CVSS:3.1/AV:N/AC:L/PR:H/UI:N/S:U/C:N/I:N/A:H` #### References - [https://nvd.nist.gov/vuln/detail/CVE-2023-1297](https://nvd.nist.gov/vuln/detail/CVE-2023-1297) - [https://discuss.hashicorp.com/t/hcsec-2023-15-consul-cluster-peering-can-result-in-denial-of-service/54515](https://discuss.hashicorp.com/t/hcsec-2023-15-consul-cluster-peering-can-result-in-denial-of-service/54515) - [https://github.com/hashicorp/consul](https://togithub.com/hashicorp/consul) This data is provided by [OSV](https://osv.dev/vulnerability/GHSA-c57c-7hrj-6q6v) and the [GitHub Advisory Database](https://togithub.com/github/advisory-database) ([CC-BY 4.0](https://togithub.com/github/advisory-database/blob/main/LICENSE.md)).
--- ### Release Notes
hashicorp/consul (github.com/hashicorp/consul) ### [`v1.14.5`](https://togithub.com/hashicorp/consul/releases/tag/v1.14.5) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.14.4...v1.14.5) #### 1.14.5 (March 7, 2023) SECURITY: - Upgrade to use Go 1.20.1. This resolves vulnerabilities [CVE-2022-41724](https://go.dev/issue/58001) in `crypto/tls` and [CVE-2022-41723](https://go.dev/issue/57855) in `net/http`. \[[GH-16263](https://togithub.com/hashicorp/consul/issues/16263)] IMPROVEMENTS: - container: Upgrade container image to use to Alpine 3.17. \[[GH-16358](https://togithub.com/hashicorp/consul/issues/16358)] - mesh: Add ServiceResolver RequestTimeout for route timeouts to make request timeouts configurable \[[GH-16495](https://togithub.com/hashicorp/consul/issues/16495)] BUG FIXES: - mesh: Fix resolution of service resolvers with subsets for external upstreams \[[GH-16499](https://togithub.com/hashicorp/consul/issues/16499)] - peering: Fix bug where services were incorrectly imported as connect-enabled. \[[GH-16339](https://togithub.com/hashicorp/consul/issues/16339)] - peering: Fix issue where mesh gateways would use the wrong address when contacting a remote peer with the same datacenter name. \[[GH-16257](https://togithub.com/hashicorp/consul/issues/16257)] - peering: Fix issue where secondary wan-federated datacenters could not be used as peering acceptors. \[[GH-16230](https://togithub.com/hashicorp/consul/issues/16230)] - proxycfg: fix a bug where terminating gateways were not cleaning up deleted service resolvers for their referenced services \[[GH-16498](https://togithub.com/hashicorp/consul/issues/16498)] ### [`v1.14.4`](https://togithub.com/hashicorp/consul/releases/tag/v1.14.4) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.14.3...v1.14.4) #### 1.14.4 (January 26, 2023) BREAKING CHANGES: - connect: Fix configuration merging for transparent proxy upstreams. Proxy-defaults and service-defaults config entries were not correctly merged for implicit upstreams in transparent proxy mode and would result in some configuration not being applied. To avoid issues when upgrading, ensure that any proxy-defaults or service-defaults have correct configuration for upstreams, since all fields will now be properly used to configure proxies. \[[GH-16000](https://togithub.com/hashicorp/consul/issues/16000)] - peering: Newly created peering connections must use only lowercase characters in the `name` field. Existing peerings with uppercase characters will not be modified, but they may encounter issues in various circumstances. To maintain forward compatibility and avoid issues, it is recommended to destroy and re-create any invalid peering connections so that they do not have a name containing uppercase characters. \[[GH-15697](https://togithub.com/hashicorp/consul/issues/15697)] FEATURES: - connect: add flags `envoy-ready-bind-port` and `envoy-ready-bind-address` to the `consul connect envoy` command that allows configuration of readiness probe on proxy for any service kind. \[[GH-16015](https://togithub.com/hashicorp/consul/issues/16015)] - deps: update to latest go-discover to provide ECS auto-discover capabilities. \[[GH-13782](https://togithub.com/hashicorp/consul/issues/13782)] IMPROVEMENTS: - acl: relax permissions on the `WatchServers`, `WatchRoots` and `GetSupportedDataplaneFeatures` gRPC endpoints to accept *any* valid ACL token \[[GH-15346](https://togithub.com/hashicorp/consul/issues/15346)] - connect: Add support for ConsulResolver to specifies a filter expression \[[GH-15659](https://togithub.com/hashicorp/consul/issues/15659)] - grpc: Use new balancer implementation to reduce periodic WARN logs when shuffling servers. \[[GH-15701](https://togithub.com/hashicorp/consul/issues/15701)] - partition: **(Consul Enterprise only)** when loading service from on-disk config file or sending API request to agent endpoint, if the partition is unspecified, consul will default the partition in the request to agent's partition \[[GH-16024](https://togithub.com/hashicorp/consul/issues/16024)] BUG FIXES: - agent: Fix assignment of error when auto-reloading cert and key file changes. \[[GH-15769](https://togithub.com/hashicorp/consul/issues/15769)] - agent: Fix issue where the agent cache would incorrectly mark protobuf objects as updated. \[[GH-15866](https://togithub.com/hashicorp/consul/issues/15866)] - cli: Fix issue where `consul connect envoy` was unable to configure TLS over unix-sockets to gRPC. \[[GH-15913](https://togithub.com/hashicorp/consul/issues/15913)] - connect: **(Consul Enterprise only)** Fix issue where upstream configuration from proxy-defaults and service-defaults was not properly merged. This could occur when a mixture of empty-strings and "default" were used for the namespace or partition fields. - connect: Fix issue where service-resolver protocol checks incorrectly errored for failover peer targets. \[[GH-15833](https://togithub.com/hashicorp/consul/issues/15833)] - connect: Fix issue where watches on upstream failover peer targets did not always query the correct data. \[[GH-15865](https://togithub.com/hashicorp/consul/issues/15865)] - xds: fix bug where sessions for locally-managed services could fail with "this server has too many xDS streams open" \[[GH-15789](https://togithub.com/hashicorp/consul/issues/15789)] ### [`v1.14.3`](https://togithub.com/hashicorp/consul/releases/tag/v1.14.3) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.14.2...v1.14.3) #### 1.14.3 (December 13, 2022) SECURITY: - Upgrade to use Go 1.19.4. This resolves a vulnerability where restricted files can be read on Windows. [CVE-2022-41720](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41720) \[[GH-15705](https://togithub.com/hashicorp/consul/issues/15705)] - Upgrades `golang.org/x/net` to prevent a denial of service by excessive memory usage caused by HTTP2 requests. [CVE-2022-41717](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41717) \[[GH-15737](https://togithub.com/hashicorp/consul/issues/15737)] FEATURES: - ui: Add field for fallback server addresses to peer token generation form \[[GH-15555](https://togithub.com/hashicorp/consul/issues/15555)] IMPROVEMENTS: - connect: ensure all vault connect CA tests use limited privilege tokens \[[GH-15669](https://togithub.com/hashicorp/consul/issues/15669)] BUG FIXES: - agent: **(Enterprise Only)** Ensure configIntentionsConvertToList does not compare empty strings with populated strings when filtering intentions created prior to AdminPartitions. - connect: Fix issue where DialedDirectly configuration was not used by Consul Dataplane. \[[GH-15760](https://togithub.com/hashicorp/consul/issues/15760)] - connect: Fix peering failovers ignoring local mesh gateway configuration. \[[GH-15690](https://togithub.com/hashicorp/consul/issues/15690)] - connect: Fixed issue where using Vault 1.11+ as CA provider in a secondary datacenter would eventually break Intermediate CAs \[[GH-15661](https://togithub.com/hashicorp/consul/issues/15661)] ### [`v1.14.2`](https://togithub.com/hashicorp/consul/releases/tag/v1.14.2) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.14.1...v1.14.2) #### 1.14.2 (November 30, 2022) FEATURES: - connect: Add local_idle_timeout_ms to allow configuring the Envoy route idle timeout on local_app connect: Add IdleTimeout to service-router to allow configuring the Envoy route idle timeout \[[GH-14340](https://togithub.com/hashicorp/consul/issues/14340)] - snapshot: **(Enterprise Only)** Add support for the snapshot agent to use an IAM role for authentication/authorization when managing snapshots in S3. IMPROVEMENTS: - dns: Add support for cluster peering `.service` and `.node` DNS queries. \[[GH-15596](https://togithub.com/hashicorp/consul/issues/15596)] BUG FIXES: - acl: avoid debug log spam in secondary datacenter servers due to management token not being initialized. \[[GH-15610](https://togithub.com/hashicorp/consul/issues/15610)] - agent: Fixed issue where blocking queries with short waits could timeout on the client \[[GH-15541](https://togithub.com/hashicorp/consul/issues/15541)] - ca: Fixed issue where using Vault as Connect CA with Vault-managed policies would error on start-up if the intermediate PKI mount existed but was empty \[[GH-15525](https://togithub.com/hashicorp/consul/issues/15525)] - cli: **(Enterprise Only)** Fix issue where `consul partition update` subcommand was not registered and therefore not available through the cli. - connect: Fixed issue where using Vault 1.11+ as CA provider would eventually break Intermediate CAs \[[GH-15217](https://togithub.com/hashicorp/consul/issues/15217)] \[[GH-15253](https://togithub.com/hashicorp/consul/issues/15253)] - namespace: **(Enterprise Only)** Fix a bug that caused blocking queries during namespace replication to timeout - peering: better represent non-passing states during peer check flattening \[[GH-15615](https://togithub.com/hashicorp/consul/issues/15615)] - peering: fix the limit of replication gRPC message; set to 8MB \[[GH-15503](https://togithub.com/hashicorp/consul/issues/15503)] ### [`v1.14.1`](https://togithub.com/hashicorp/consul/releases/tag/v1.14.1) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.14.0...v1.14.1) #### 1.14.1 (November 21, 2022) BUG FIXES: - cli: Fix issue where `consul connect envoy` incorrectly uses the HTTPS API configuration for xDS connections. \[[GH-15466](https://togithub.com/hashicorp/consul/issues/15466)] - sdk: Fix SDK testutil backwards compatibility by only configuring grpc_tls port for new Consul versions. \[[GH-15423](https://togithub.com/hashicorp/consul/issues/15423)] ### [`v1.14.0`](https://togithub.com/hashicorp/consul/releases/tag/v1.14.0) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.13.9...v1.14.0) #### 1.14.0 (November 15, 2022) BREAKING CHANGES: - config: Add new `ports.grpc_tls` configuration option. Introduce a new port to better separate TLS config from the existing `ports.grpc` config. The new `ports.grpc_tls` only supports TLS encrypted communication. The existing `ports.grpc` now only supports plain-text communication. \[[GH-15339](https://togithub.com/hashicorp/consul/issues/15339)] - config: update 1.14 config defaults: Enable `peering` and `connect` by default. \[[GH-15302](https://togithub.com/hashicorp/consul/issues/15302)] - config: update 1.14 config defaults: Set gRPC TLS port default value to 8503 \[[GH-15302](https://togithub.com/hashicorp/consul/issues/15302)] - connect: Removes support for Envoy 1.20 \[[GH-15093](https://togithub.com/hashicorp/consul/issues/15093)] - peering: Rename `PeerName` to `Peer` on prepared queries and exported services. \[[GH-14854](https://togithub.com/hashicorp/consul/issues/14854)] - xds: Convert service mesh failover to use Envoy's aggregate clusters. This changes the names of some [Envoy dynamic HTTP metrics](https://www.envoyproxy.io/docs/envoy/latest/configuration/upstream/cluster_manager/cluster_stats#dynamic-http-statistics). \[[GH-14178](https://togithub.com/hashicorp/consul/issues/14178)] SECURITY: - Ensure that data imported from peers is filtered by ACLs at the UI Nodes/Services endpoints [CVE-2022-3920](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-3920) \[[GH-15356](https://togithub.com/hashicorp/consul/issues/15356)] FEATURES: - DNS-proxy support via gRPC request. \[[GH-14811](https://togithub.com/hashicorp/consul/issues/14811)] - cli: Add -node-name flag to redirect-traffic command to support running in environments without client agents. \[[GH-14933](https://togithub.com/hashicorp/consul/issues/14933)] - cli: Add `-consul-dns-port` flag to the `consul connect redirect-traffic` command to allow forwarding DNS traffic to a specific Consul DNS port. \[[GH-15050](https://togithub.com/hashicorp/consul/issues/15050)] - connect: Add Envoy connection balancing configuration fields. \[[GH-14616](https://togithub.com/hashicorp/consul/issues/14616)] - grpc: Added metrics for external gRPC server. Added `server_type=internal|external` label to gRPC metrics. \[[GH-14922](https://togithub.com/hashicorp/consul/issues/14922)] - http: Add new `get-or-empty` operation to the txn api. Refer to the [API docs](https://www.consul.io/api-docs/txn#kv-operations) for more information. \[[GH-14474](https://togithub.com/hashicorp/consul/issues/14474)] - peering: Add mesh gateway local mode support for cluster peering. \[[GH-14817](https://togithub.com/hashicorp/consul/issues/14817)] - peering: Add support for stale queries for trust bundle lookups \[[GH-14724](https://togithub.com/hashicorp/consul/issues/14724)] - peering: Add support to failover to services running on cluster peers. \[[GH-14396](https://togithub.com/hashicorp/consul/issues/14396)] - peering: Add support to redirect to services running on cluster peers with service resolvers. \[[GH-14445](https://togithub.com/hashicorp/consul/issues/14445)] - peering: Ensure un-exported services get deleted even if the un-export happens while cluster peering replication is down. \[[GH-14797](https://togithub.com/hashicorp/consul/issues/14797)] - peering: add support for routine peering control-plane traffic through mesh gateways \[[GH-14981](https://togithub.com/hashicorp/consul/issues/14981)] - sdk: Configure `iptables` to forward DNS traffic to a specific DNS port. \[[GH-15050](https://togithub.com/hashicorp/consul/issues/15050)] - telemetry: emit memberlist size metrics and broadcast queue depth metric. \[[GH-14873](https://togithub.com/hashicorp/consul/issues/14873)] - ui: Added support for central config merging \[[GH-14604](https://togithub.com/hashicorp/consul/issues/14604)] - ui: Create peerings detail page \[[GH-14947](https://togithub.com/hashicorp/consul/issues/14947)] - ui: Detect a TokenSecretID cookie and passthrough to localStorage \[[GH-14495](https://togithub.com/hashicorp/consul/issues/14495)] - ui: Display notice banner on nodes index page if synthetic nodes are being filtered. \[[GH-14971](https://togithub.com/hashicorp/consul/issues/14971)] - ui: Filter agentless (synthetic) nodes from the nodes list page. \[[GH-14970](https://togithub.com/hashicorp/consul/issues/14970)] - ui: Filter out node health checks on agentless service instances \[[GH-14986](https://togithub.com/hashicorp/consul/issues/14986)] - ui: Remove node meta on service instances when using agentless and consolidate external-source labels on service instances page if they all match. \[[GH-14921](https://togithub.com/hashicorp/consul/issues/14921)] - ui: Removed reference to node name on service instance page when using agentless \[[GH-14903](https://togithub.com/hashicorp/consul/issues/14903)] - ui: Use withCredentials for all HTTP API requests \[[GH-14343](https://togithub.com/hashicorp/consul/issues/14343)] - xds: servers will limit the number of concurrent xDS streams they can handle to balance the load across all servers \[[GH-14397](https://togithub.com/hashicorp/consul/issues/14397)] IMPROVEMENTS: - peering: Add peering datacenter and partition to initial handshake. \[[GH-14889](https://togithub.com/hashicorp/consul/issues/14889)] - xds: Added a rate limiter to the delivery of proxy config updates, to prevent updates to "global" resources such as wildcard intentions from overwhelming servers (see: `xds.update_max_per_second` config field) \[[GH-14960](https://togithub.com/hashicorp/consul/issues/14960)] - xds: Removed a bottleneck in Envoy config generation, enabling a higher number of dataplanes per server \[[GH-14934](https://togithub.com/hashicorp/consul/issues/14934)] - agent/hcp: add initial HashiCorp Cloud Platform integration \[[GH-14723](https://togithub.com/hashicorp/consul/issues/14723)] - agent: Added configuration option cloud.scada_address. \[[GH-14936](https://togithub.com/hashicorp/consul/issues/14936)] - api: Add filtering support to Catalog's List Services (v1/catalog/services) \[[GH-11742](https://togithub.com/hashicorp/consul/issues/11742)] - api: Increase max number of operations inside a transaction for requests to /v1/txn (128) \[[GH-14599](https://togithub.com/hashicorp/consul/issues/14599)] - auto-config: Relax the validation on auto-config JWT authorization to allow non-whitespace, non-quote characters in node names. \[[GH-15370](https://togithub.com/hashicorp/consul/issues/15370)] - config-entry: Validate that service-resolver `Failover`s and `Redirect`s only specify `Partition` and `Namespace` on Consul Enterprise. This prevents scenarios where OSS Consul would save service-resolvers that require Consul Enterprise. \[[GH-14162](https://togithub.com/hashicorp/consul/issues/14162)] - connect: Add Envoy 1.24.0 to support matrix \[[GH-15093](https://togithub.com/hashicorp/consul/issues/15093)] - connect: Bump Envoy 1.20 to 1.20.7, 1.21 to 1.21.5 and 1.22 to 1.22.5 \[[GH-14831](https://togithub.com/hashicorp/consul/issues/14831)] - connect: service-router destinations have gained a `RetryOn` field for specifying the conditions when Envoy should retry requests beyond specific status codes and generic connection failure which already exists. \[[GH-12890](https://togithub.com/hashicorp/consul/issues/12890)] - dns/peering: **(Enterprise Only)** Support addresses in the formats `.virtual..ns..ap..peer.consul` and `.virtual..ap..peer.consul`. This longer form address that allows specifying `.peer` would need to be used for tproxy DNS requests made within non-default partitions for imported services. - dns: **(Enterprise Only)** All enterprise locality labels are now optional in DNS lookups. For example, service lookups support the following format: `[.].service[..ns][..ap][..dc]`. \[[GH-14679](https://togithub.com/hashicorp/consul/issues/14679)] - integ test: fix flakiness due to test condition from retry app endoint \[[GH-15233](https://togithub.com/hashicorp/consul/issues/15233)] - metrics: Service RPC calls less than 1ms are now emitted as a decimal number. \[[GH-12905](https://togithub.com/hashicorp/consul/issues/12905)] - peering: adds an internally managed server certificate for automatic TLS between servers in peer clusters. \[[GH-14556](https://togithub.com/hashicorp/consul/issues/14556)] - peering: require TLS for peering connections using server cert signed by Connect CA \[[GH-14796](https://togithub.com/hashicorp/consul/issues/14796)] - peering: return information about the health of the peering when the leader is queried to read a peering. \[[GH-14747](https://togithub.com/hashicorp/consul/issues/14747)] - raft: Allow nonVoter to initiate an election to avoid having an election infinite loop when a Voter is converted to NonVoter \[[GH-14897](https://togithub.com/hashicorp/consul/issues/14897)] - raft: Cap maximum grpc wait time when heartbeating to heartbeatTimeout/2 \[[GH-14897](https://togithub.com/hashicorp/consul/issues/14897)] - raft: Fix a race condition where the snapshot file is closed without being opened \[[GH-14897](https://togithub.com/hashicorp/consul/issues/14897)] - telemetry: Added a `consul.xds.server.streamStart` metric to measure time taken to first generate xDS resources for an xDS stream. \[[GH-14957](https://togithub.com/hashicorp/consul/issues/14957)] - ui: Improve guidance around topology visualisation \[[GH-14527](https://togithub.com/hashicorp/consul/issues/14527)] - xds: Set `max_ejection_percent` on Envoy's outlier detection to 100% for peered services. \[[GH-14373](https://togithub.com/hashicorp/consul/issues/14373)] BUG FIXES: - checks: Do not set interval as timeout value \[[GH-14619](https://togithub.com/hashicorp/consul/issues/14619)] - checks: If set, use proxy address for automatically added sidecar check instead of service address. \[[GH-14433](https://togithub.com/hashicorp/consul/issues/14433)] - cli: Fix Consul kv CLI 'GET' flags 'keys' and 'recurse' to be set together \[[GH-13493](https://togithub.com/hashicorp/consul/issues/13493)] - connect: Fix issue where mesh-gateway settings were not properly inherited from configuration entries. \[[GH-15186](https://togithub.com/hashicorp/consul/issues/15186)] - connect: fixed bug where endpoint updates for new xDS clusters could block for 15s before being sent to Envoy. \[[GH-15083](https://togithub.com/hashicorp/consul/issues/15083)] - connect: strip port from DNS SANs for ingress gateway leaf certificate to avoid an invalid hostname error when using the Vault provider. \[[GH-15320](https://togithub.com/hashicorp/consul/issues/15320)] - debug: fixed bug that caused consul debug CLI to error on ACL-disabled clusters \[[GH-15155](https://togithub.com/hashicorp/consul/issues/15155)] - deps: update go-memdb, fixing goroutine leak \[[GH-15010](https://togithub.com/hashicorp/consul/issues/15010)] \[[GH-15068](https://togithub.com/hashicorp/consul/issues/15068)] - grpc: Merge proxy-defaults and service-defaults in GetEnvoyBootstrapParams response. \[[GH-14869](https://togithub.com/hashicorp/consul/issues/14869)] - metrics: Add duplicate metrics that have only a single "consul\_" prefix for all existing metrics with double ("consul_consul\_") prefix, with the intent to standardize on single prefixes. \[[GH-14475](https://togithub.com/hashicorp/consul/issues/14475)] - namespace: **(Enterprise Only)** Fixed a bug where a client may incorrectly log that namespaces were not enabled in the local datacenter - peering: Fix a bug that resulted in /v1/agent/metrics returning an error. \[[GH-15178](https://togithub.com/hashicorp/consul/issues/15178)] - peering: fix nil pointer in calling handleUpdateService \[[GH-15160](https://togithub.com/hashicorp/consul/issues/15160)] - peering: fix the error of wan address isn't taken by the peering token. \[[GH-15065](https://togithub.com/hashicorp/consul/issues/15065)] - peering: when wan address is set, peering stream should use the wan address. \[[GH-15108](https://togithub.com/hashicorp/consul/issues/15108)] - proxycfg(mesh-gateway): Fix issue where deregistered services are not removed from mesh-gateway clusters. \[[GH-15272](https://togithub.com/hashicorp/consul/issues/15272)] - server: fix goroutine/memory leaks in the xDS subsystem (these were present regardless of whether or not xDS was in-use) \[[GH-14916](https://togithub.com/hashicorp/consul/issues/14916)] - server: fixes the error trying to source proxy configuration for http checks, in case of proxies using consul-dataplane. \[[GH-14924](https://togithub.com/hashicorp/consul/issues/14924)] - xds: Central service configuration (proxy-defaults and service-defaults) is now correctly applied to Consul Dataplane proxies \[[GH-14962](https://togithub.com/hashicorp/consul/issues/14962)] NOTES: - deps: Upgrade to use Go 1.19.2 \[[GH-15090](https://togithub.com/hashicorp/consul/issues/15090)] ### [`v1.13.9`](https://togithub.com/hashicorp/consul/releases/tag/v1.13.9) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.13.8...v1.13.9) #### 1.13.9 (June 26, 2023) BREAKING CHANGES: - connect: Disable peering by default in connect proxies for Consul 1.13. This change was made to prevent inefficient polling queries from having a negative impact on server performance. Peering in Consul 1.13 is an experimental feature and is not recommended for use in production environments. If you still wish to use the experimental peering feature, ensure [`peering.enabled = true`](https://developer.hashicorp.com/consul/docs/v1.13.x/agent/config/config-files#peering_enabled) is set on all clients and servers. \[[GH-17731](https://togithub.com/hashicorp/consul/issues/17731)] SECURITY: - Update to UBI base image to 9.2. \[[GH-17513](https://togithub.com/hashicorp/consul/issues/17513)] FEATURES: - server: **(Enterprise Only)** allow automatic license utilization reporting. \[[GH-5102](https://togithub.com/hashicorp/consul/issues/5102)] IMPROVEMENTS: - debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' \[[GH-17596](https://togithub.com/hashicorp/consul/issues/17596)] - systemd: set service type to notify. \[[GH-16845](https://togithub.com/hashicorp/consul/issues/16845)] BUG FIXES: - cache: fix a few minor goroutine leaks in leaf certs and the agent cache \[[GH-17636](https://togithub.com/hashicorp/consul/issues/17636)] - namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. - namespaces: adjusts the return type from HTTP list API to return the `api` module representation of a namespace. This fixes an error with the `consul namespace list` command when a namespace has a deferred deletion timestamp. - peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. \[[GH-17483](https://togithub.com/hashicorp/consul/issues/17483)] ### [`v1.13.8`](https://togithub.com/hashicorp/consul/releases/tag/v1.13.8) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.13.7...v1.13.8) #### 1.13.8 (May 16, 2023) SECURITY: - Upgrade to use Go 1.20.1. This resolves vulnerabilities [CVE-2022-41724](https://go.dev/issue/58001) in `crypto/tls` and [CVE-2022-41723](https://go.dev/issue/57855) in `net/http`. \[[GH-16263](https://togithub.com/hashicorp/consul/issues/16263)] - Upgrade to use Go 1.20.4. This resolves vulnerabilities [CVE-2023-24537](https://togithub.com/advisories/GHSA-9f7g-gqwh-jpf5)(`go/scanner`), [CVE-2023-24538](https://togithub.com/advisories/GHSA-v4m2-x4rp-hv22)(`html/template`), [CVE-2023-24534](https://togithub.com/advisories/GHSA-8v5j-pwr7-w5f8)(`net/textproto`) and [CVE-2023-24536](https://togithub.com/advisories/GHSA-9f7g-gqwh-jpf5)(`mime/multipart`). Also, `golang.org/x/net` has been updated to v0.7.0 to resolve CVEs [CVE-2022-41721](https://togithub.com/advisories/GHSA-fxg5-wq6x-vr4w), [CVE-2022-27664](https://togithub.com/advisories/GHSA-69cg-p879-7622) and [CVE-2022-41723](https://togithub.com/advisories/GHSA-vvpx-j8f3-3w6h.) \[[GH-17240](https://togithub.com/hashicorp/consul/issues/17240)] IMPROVEMENTS: - api: updated the go module directive to 1.18. \[[GH-15297](https://togithub.com/hashicorp/consul/issues/15297)] - connect: update supported envoy versions to 1.20.7, 1.21.6, 1.22.11, 1.23.8 \[[GH-16891](https://togithub.com/hashicorp/consul/issues/16891)] - sdk: updated the go module directive to 1.18. \[[GH-15297](https://togithub.com/hashicorp/consul/issues/15297)] BUG FIXES: - Fix an bug where decoding some Config structs with unset pointer fields could fail with `reflect: call of reflect.Value.Type on zero Value`. \[[GH-17048](https://togithub.com/hashicorp/consul/issues/17048)] - audit-logging: (Enterprise only) Fix a bug where `/agent/monitor` and `/agent/metrics` endpoints return a `Streaming not supported` error when audit logs are enabled. This also fixes the delay receiving logs when running `consul monitor` against an agent with audit logs enabled. \[[GH-16700](https://togithub.com/hashicorp/consul/issues/16700)] - ca: Fixes a bug where updating Vault CA Provider config would cause TLS issues in the service mesh \[[GH-16592](https://togithub.com/hashicorp/consul/issues/16592)] - connect: Fix multiple inefficient behaviors when querying service health. \[[GH-17241](https://togithub.com/hashicorp/consul/issues/17241)] - grpc: ensure grpc resolver correctly uses lan/wan addresses on servers \[[GH-17270](https://togithub.com/hashicorp/consul/issues/17270)] - peering: Fixes a bug that can lead to peering service deletes impacting the state of local services \[[GH-16570](https://togithub.com/hashicorp/consul/issues/16570)] - xds: Fix possible panic that can when generating clusters before the root certificates have been fetched. \[[GH-17185](https://togithub.com/hashicorp/consul/issues/17185)] ### [`v1.13.7`](https://togithub.com/hashicorp/consul/releases/tag/v1.13.7) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.13.6...v1.13.7) #### 1.13.7 (March 7, 2023) SECURITY: - Upgrade to use Go 1.19.6. This resolves vulnerabilities [CVE-2022-41724](https://go.dev/issue/58001) in `crypto/tls` and [CVE-2022-41723](https://go.dev/issue/57855) in `net/http`. \[[GH-16299](https://togithub.com/hashicorp/consul/issues/16299)] IMPROVEMENTS: - xds: Removed a bottleneck in Envoy config generation. \[[GH-16269](https://togithub.com/hashicorp/consul/issues/16269)] - container: Upgrade container image to use to Alpine 3.17. \[[GH-16358](https://togithub.com/hashicorp/consul/issues/16358)] - mesh: Add ServiceResolver RequestTimeout for route timeouts to make request timeouts configurable \[[GH-16495](https://togithub.com/hashicorp/consul/issues/16495)] BUG FIXES: - mesh: Fix resolution of service resolvers with subsets for external upstreams \[[GH-16499](https://togithub.com/hashicorp/consul/issues/16499)] - proxycfg: fix a bug where terminating gateways were not cleaning up deleted service resolvers for their referenced services \[[GH-16498](https://togithub.com/hashicorp/consul/issues/16498)] ### [`v1.13.6`](https://togithub.com/hashicorp/consul/releases/tag/v1.13.6) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.13.5...v1.13.6) #### 1.13.6 (January 26, 2023) FEATURES: - connect: add flags `envoy-ready-bind-port` and `envoy-ready-bind-address` to the `consul connect envoy` command that allows configuration of readiness probe on proxy for any service kind. \[[GH-16015](https://togithub.com/hashicorp/consul/issues/16015)] - deps: update to latest go-discover to provide ECS auto-discover capabilities. \[[GH-13782](https://togithub.com/hashicorp/consul/issues/13782)] IMPROVEMENTS: - grpc: Use new balancer implementation to reduce periodic WARN logs when shuffling servers. \[[GH-15701](https://togithub.com/hashicorp/consul/issues/15701)] - partition: **(Consul Enterprise only)** when loading service from on-disk config file or sending API request to agent endpoint, if the partition is unspecified, consul will default the partition in the request to agent's partition \[[GH-16024](https://togithub.com/hashicorp/consul/issues/16024)] BUG FIXES: - agent: Fix assignment of error when auto-reloading cert and key file changes. \[[GH-15769](https://togithub.com/hashicorp/consul/issues/15769)] ### [`v1.13.5`](https://togithub.com/hashicorp/consul/releases/tag/v1.13.5) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.13.4...v1.13.5) #### 1.13.5 (December 13, 2022) SECURITY: - Upgrade to use Go 1.18.9. This resolves a vulnerability where restricted files can be read on Windows. [CVE-2022-41720](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41720) \[[GH-15706](https://togithub.com/hashicorp/consul/issues/15706)] - Upgrades `golang.org/x/net` to prevent a denial of service by excessive memory usage caused by HTTP2 requests. [CVE-2022-41717](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-41717) \[[GH-15743](https://togithub.com/hashicorp/consul/issues/15743)] IMPROVEMENTS: - connect: ensure all vault connect CA tests use limited privilege tokens \[[GH-15669](https://togithub.com/hashicorp/consul/issues/15669)] BUG FIXES: - agent: **(Enterprise Only)** Ensure configIntentionsConvertToList does not compare empty strings with populated strings when filtering intentions created prior to AdminPartitions. - cli: **(Enterprise Only)** Fix issue where `consul partition update` subcommand was not registered and therefore not available through the cli. - connect: Fixed issue where using Vault 1.11+ as CA provider in a secondary datacenter would eventually break Intermediate CAs \[[GH-15661](https://togithub.com/hashicorp/consul/issues/15661)] ### [`v1.13.4`](https://togithub.com/hashicorp/consul/releases/tag/v1.13.4) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.13.3...v1.13.4) #### 1.13.4 (November 30, 2022) IMPROVEMENTS: - auto-config: Relax the validation on auto-config JWT authorization to allow non-whitespace, non-quote characters in node names. \[[GH-15370](https://togithub.com/hashicorp/consul/issues/15370)] - raft: Allow nonVoter to initiate an election to avoid having an election infinite loop when a Voter is converted to NonVoter \[[GH-14897](https://togithub.com/hashicorp/consul/issues/14897)] - raft: Cap maximum grpc wait time when heartbeating to heartbeatTimeout/2 \[[GH-14897](https://togithub.com/hashicorp/consul/issues/14897)] - raft: Fix a race condition where the snapshot file is closed without being opened \[[GH-14897](https://togithub.com/hashicorp/consul/issues/14897)] BUG FIXES: - agent: Fixed issue where blocking queries with short waits could timeout on the client \[[GH-15541](https://togithub.com/hashicorp/consul/issues/15541)] - ca: Fixed issue where using Vault as Connect CA with Vault-managed policies would error on start-up if the intermediate PKI mount existed but was empty \[[GH-15525](https://togithub.com/hashicorp/consul/issues/15525)] - connect: Fixed issue where using Vault 1.11+ as CA provider would eventually break Intermediate CAs \[[GH-15217](https://togithub.com/hashicorp/consul/issues/15217)] \[[GH-15253](https://togithub.com/hashicorp/consul/issues/15253)] - connect: fixed bug where endpoint updates for new xDS clusters could block for 15s before being sent to Envoy. \[[GH-15083](https://togithub.com/hashicorp/consul/issues/15083)] - connect: strip port from DNS SANs for ingress gateway leaf certificate to avoid an invalid hostname error when using the Vault provider. \[[GH-15320](https://togithub.com/hashicorp/consul/issues/15320)] - debug: fixed bug that caused consul debug CLI to error on ACL-disabled clusters \[[GH-15155](https://togithub.com/hashicorp/consul/issues/15155)] - deps: update go-memdb, fixing goroutine leak \[[GH-15010](https://togithub.com/hashicorp/consul/issues/15010)] \[[GH-15068](https://togithub.com/hashicorp/consul/issues/15068)] - namespace: **(Enterprise Only)** Fix a bug that caused blocking queries during namespace replication to timeout - namespace: **(Enterprise Only)** Fixed a bug where a client may incorrectly log that namespaces were not enabled in the local datacenter - peering: better represent non-passing states during peer check flattening \[[GH-15615](https://togithub.com/hashicorp/consul/issues/15615)] - peering: fix the error of wan address isn't taken by the peering token. \[[GH-15065](https://togithub.com/hashicorp/consul/issues/15065)] - peering: when wan address is set, peering stream should use the wan address. \[[GH-15108](https://togithub.com/hashicorp/consul/issues/15108)] ### [`v1.13.3`](https://togithub.com/hashicorp/consul/releases/tag/v1.13.3) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.13.2...v1.13.3) #### 1.13.3 (October 19, 2022) FEATURES: - agent: Added a new config option `rpc_client_timeout` to tune timeouts for client RPC requests \[[GH-14965](https://togithub.com/hashicorp/consul/issues/14965)] - config-entry(ingress-gateway): Added support for `max_connections` for upstream clusters \[[GH-14749](https://togithub.com/hashicorp/consul/issues/14749)] IMPROVEMENTS: - connect/ca: Log a warning message instead of erroring when attempting to update the intermediate pki mount when using the Vault provider. \[[GH-15035](https://togithub.com/hashicorp/consul/issues/15035)] - connect: Added gateway options to Envoy proxy config for enabling tcp keepalives on terminating gateway upstreams and mesh gateways in remote datacenters. \[[GH-14800](https://togithub.com/hashicorp/consul/issues/14800)] - connect: Bump Envoy 1.20 to 1.20.7, 1.21 to 1.21.5 and 1.22 to 1.22.5 \[[GH-14828](https://togithub.com/hashicorp/consul/issues/14828)] - licensing: **(Enterprise Only)** Consul Enterprise production licenses do not degrade or terminate Consul upon expiration. They will only fail when trying to upgrade to a newer version of Consul. Evaluation licenses still terminate. \[[GH-1990](https://togithub.com/hashicorp/consul/issues/1990)] BUG FIXES: - agent: avoid leaking the alias check runner goroutine when the check is de-registered \[[GH-14935](https://togithub.com/hashicorp/consul/issues/14935)] - ca: fix a masked bug in leaf cert generation that would not be notified of root cert rotation after the first one \[[GH-15005](https://togithub.com/hashicorp/consul/issues/15005)] - cache: prevent goroutine leak in agent cache \[[GH-14908](https://togithub.com/hashicorp/consul/issues/14908)] - checks: Fixed a bug that prevented registration of UDP health checks from agent configuration files, such as service definition files with embedded health check definitions. \[[GH-14885](https://togithub.com/hashicorp/consul/issues/14885)] - connect: Fixed a bug where transparent proxy does not correctly spawn listeners for upstreams to service-resolvers. \[[GH-14751](https://togithub.com/hashicorp/consul/issues/14751)] - snapshot-agent: **(Enterprise only)** Fix a bug when a session is not found in Consul, which leads the agent to panic. ### [`v1.13.2`](https://togithub.com/hashicorp/consul/releases/tag/v1.13.2) [Compare Source](https://togithub.com/hashicorp/consul/compare/v1.13.1...v1.13.2) #### 1.13.2 (September 20, 2022) SECURITY: - auto-config: Added input validation for auto-config JWT authorization checks. Prior to this change, it was possible for malicious actors to construct requests which incorrectly pass custom JWT claim validation for the `AutoConfig.InitialConfiguration` endpoint. Now, only a subset of characters are allowed for the input before evaluating the bexpr. \[[GH-14577](https://togithub.com/hashicorp/consul/issues/14577)] - connect: Added URI length checks to ConnectCA CSR requests. Prior to this change, it was possible for a malicious actor to designate multiple SAN URI values in a call to the `ConnectCA.Sign` endpoint. The endpoint now only allows for exactly one SAN URI to be specified. \[[GH-14579](https://togithub.com/hashicorp/consul/issues/14579)] FEATURES: - cli: Adds new subcommands for `peering` workflows. Refer to the [CLI docs](https://www.consul.io/commands/peering) for more information. \[[GH-14423](https://togithub.com/hashicorp/consul/issues/14423)] - connect: Server address changes are streamed to peers \[[GH-14285](https://togithub.com/hashicorp/consul/issues/14285)] - service-defaults: Added support for `local_request_timeout_ms` and `local_connect_timeout_ms` in servicedefaults config entry \[[GH-14395](https://togithub.com/hashicorp/consul/issues/14395)] IMPROVEMENTS: - connect: Bump latest Envoy to 1.23.1 in test matrix \[[GH-14573](https://togithub.com/hashicorp/consul/issues/14573)] - connect: expose new tracing configuration on envoy \[[GH-13998](https://togithub.com/hashicorp/consul/issues/13998)] - envoy: adds additional Envoy outlier ejection parameters to passive health check configurations. \[[GH-14238](https://togithub.com/hashicorp/consul/issues/14238)] - metrics: add labels of segment, partition, network area, network (lan or wan) to serf and memberlist metrics \[[GH-14161](https://togithub.com/hashicorp/consul/issues/14161)] - peering: Validate peering tokens for server name conflicts \[[GH-14563](https://togithub.com/hashicorp/consul/issues/14563)] - snapshot agent: **(Enterprise only)** Add support for path-based addressing when using s3 backend. - ui: Reuse connections for requests to /v1/internal/ui/metrics-proxy/ \[[GH-14521](https://togithub.com/hashicorp/consul/issues/14521)] BUG FIXES: - agent: Fixes an issue where an agent that fails to start due to bad addresses won't clean up any existing liste
--- ### Configuration 📅 **Schedule**: Branch creation - "" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/grafana/loki). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 803 ++++++++++++++++++++++++++++++++++++++++----- vendor/modules.txt | 2 +- 3 files changed, 720 insertions(+), 87 deletions(-) diff --git a/go.mod b/go.mod index f64f68f05f355..0a3b7f5343afd 100644 --- a/go.mod +++ b/go.mod @@ -335,7 +335,7 @@ replace github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v replace github.com/Azure/azure-storage-blob-go => github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e -replace github.com/hashicorp/consul => github.com/hashicorp/consul v1.5.1 +replace github.com/hashicorp/consul => github.com/hashicorp/consul v1.14.5 // Use fork of gocql that has gokit logs and Prometheus metrics. replace github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 diff --git a/go.sum b/go.sum index ce9b82ad1e2ac..88ec80bbecaa6 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= @@ -18,43 +19,124 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigtable v1.18.1 h1:SxQk9Bj6OKxeiuvevG/KBjqGn/7X8heZbWfK0tYkFd8= cloud.google.com/go/bigtable v1.18.1/go.mod h1:NAVyfJot9jlo+KmgWLUJ5DJGwNDoChzAcrecLpmuAmY= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.22.0 h1:cB8R6FtUtT1TYGl5R3xuxnW6OUIc/DrT2aiR16TTG7Y= cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/kms v1.12.1 h1:xZmZuwy2cwzsocmKDOPu4BL7umg8QXagQx6fKVmf45U= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.32.0 h1:JOEkgEYBuUTHSyHS4TcqOFuWr+vD6qO/imsFqShUCp4= cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= @@ -79,27 +161,39 @@ github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v10.7.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v10.15.3+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= +github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= +github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.1/go.mod h1:5TgH20II424SXIV9YDBsO4rBCKsh39Vbx9DvhJZZ8rU= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -109,18 +203,24 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= @@ -128,10 +228,11 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkM github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= +github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g= github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= @@ -139,7 +240,6 @@ github.com/IBM/go-sdk-core/v5 v5.13.1 h1:zD6p3t1whAlRJo/VBmE69c8RcH9LCHL1n0/sO1M github.com/IBM/go-sdk-core/v5 v5.13.1/go.mod h1:pVkN7IGmsSdmR1ZCU4E/cLcCclqRKMYgg7ya+O2Mk6g= github.com/IBM/ibm-cos-sdk-go v1.10.0 h1:/2VIev2/jBei39OqU2+nSZQnoWJ+KtkiSAIDkqsd7uU= github.com/IBM/ibm-cos-sdk-go v1.10.0/go.mod h1:C8KRTRaoD3CWPPBOa6FCOpdh0ZMlUjKAAA4i3F+Q/sc= -github.com/Jeffail/gabs v1.1.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e h1:HisBR+gQKIwJqDe1iNVqUDk+GTRE2IZAbl+fLoDKNBs= github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= @@ -159,16 +259,16 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= +github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/SAP/go-hdb v0.12.0/go.mod h1:etBT+FAi1t5k3K3tf5vQTnosgYmhDkRi8jEnQqCnxF0= -github.com/SermoDigital/jose v0.0.0-20180104203859-803625baeddc/go.mod h1:ARgCUhI1MHQH+ONky/PAtmVHQrP5JlGY0F3poXOp/fA= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.21.0/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoLjkm5tCQ= github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= @@ -182,7 +282,10 @@ github.com/Workiva/go-datastructures v1.1.0/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5 github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/akamai/AkamaiOPEN-edgegrid-golang v0.9.0/go.mod h1:zpDJeKyp9ScW4NNrbdr+Eyxvry3ilGPewKoXw3XGN1k= +github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -195,31 +298,47 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZp github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190808125512-07798873deee/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/aliyun/aliyun-oss-go-sdk v2.2.7+incompatible h1:KpbJFXwhVeuxNtBJ74MCGbIoaBok2uZvkD7QXp2+Wis= github.com/aliyun/aliyun-oss-go-sdk v2.2.7+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180319081651-7d2e70ef918f/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.42.34/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/aws/aws-sdk-go v1.44.321 h1:iXwFLxWjZPjYqjPq0EcCs46xX7oDLEELte1+BzgpKk8= github.com/aws/aws-sdk-go v1.44.321/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= @@ -249,6 +368,7 @@ github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc h1:Keo7wQ7UODU github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= github.com/baidubce/bce-sdk-go v0.9.141 h1:EV5BH5lfymIGPSmYDo9xYdsVlvWAW6nFeiA6t929zBE= github.com/baidubce/bce-sdk-go v0.9.141/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -269,38 +389,43 @@ github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMU github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/caddyserver/caddy v1.0.4/go.mod h1:uruyfVsyMcDb3IOzSKsi1x0wOjy1my/PxOSTcD+24jM= github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v0.0.0-20161109192337-d17a8420c36e/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.0.0-20161110002650-365d370cc145/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cloudflare-go v0.10.2/go.mod h1:qhVI5MKwBGhdNU89ZRz2plgYutcJ5PCekLxXn56w6SY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -308,23 +433,32 @@ github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMe github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/coredns/coredns v1.1.2/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0= +github.com/coredns/coredns v1.6.6/go.mod h1:Bdcnka9HmKGYj12ZIDF3lpQSfDHSsMc85Wj9xEyZUts= +github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/cpu/goacmedns v0.0.1/go.mod h1:sesf/pNnCYwUevQEQfEwY0Y3DydlQWSGZbaMElOWxok= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -339,26 +473,32 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20180620032804-94c9c97e8c9f/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= +github.com/decker502/dnspod-go v0.2.0/go.mod h1:qsurYu1FgxcDwfSwXJdLt4kRsBLZeosEb9uq4Sy+08g= github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.1.1/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= github.com/digitalocean/godo v1.10.0/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnsimple/dnsimple-go v0.30.0/go.mod h1:O5TJ0/U6r7AfT8niYNlmohpLbCSG+c71tQlGr9SeGrg= +github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U= github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -373,13 +513,13 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 h1:9Hsno4vmXpQ0yVAp07bLxS5dHH24w80xzmUCLil47ME= github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g= github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g= -github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -400,12 +540,11 @@ github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8E github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= github.com/efficientgo/e2e v0.13.1-0.20220922081603-45de9fc588a8 h1:UFLc39BcUXahSNCLUrKjNGZABMUZaS4M74EZvTRnq3k= -github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.0.0-20180919002855-2137d9196328/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -413,29 +552,36 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= +github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/fatih/structs v0.0.0-20180123065059-ebf56d35bba7/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+ptC4kTFPEN9Ej2latTq3bZJ5HO/OwPXYMs= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -443,7 +589,9 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8= @@ -452,10 +600,17 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-acme/lego/v3 v3.1.0/go.mod h1:074uqt+JS6plx+c9Xaiz6+L+GBb+7itGtzfcDM2AhEE= +github.com/go-acme/lego/v3 v3.2.0/go.mod h1:074uqt+JS6plx+c9Xaiz6+L+GBb+7itGtzfcDM2AhEE= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-cmd/cmd v1.0.5/go.mod h1:y8q8qlK5wQibcw63djSl/ntiHUHXHGdCkPk0j4QeW4s= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ini/ini v1.44.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -464,7 +619,7 @@ github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEai github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -478,47 +633,127 @@ github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= +github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/runtime v0.24.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= +github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= @@ -530,12 +765,13 @@ github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= -github.com/go-sql-driver/mysql v0.0.0-20180618115901-749ddf1598b4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -569,10 +805,12 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -582,6 +820,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= @@ -590,12 +829,12 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -603,8 +842,11 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= @@ -619,13 +861,16 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= @@ -643,10 +888,11 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -662,6 +908,7 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -673,6 +920,11 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= @@ -681,26 +933,38 @@ github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qA github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2/go.mod h1:DavVbd41y+b7ukKDmlnPR4nGYmkWXR6vHUkjQNiHPBs= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopcua/opcua v0.1.12/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= -github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -711,9 +975,9 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 h1:wRtcM7fvzg/MJ4KCIYLryadp2fI3pO61BEiY7SizCoI= @@ -730,57 +994,74 @@ github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 h1:bjh0PVYSVVFxzINqPFYJmAmJNrWPgnVjuSdYJGHmtFU= github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0/go.mod h1:7t5XR+2IA8P2qggOAHTj/GCZfoLBle3OvNSYh1VkRBU= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= -github.com/hashicorp/consul v1.5.1 h1:p7tRmQ4m3ZMYkGQkuyjLXKbdU1weeumgZFqZOvw7o4c= -github.com/hashicorp/consul v1.5.1/go.mod h1:QsmgXh2YA9Njv6y3/FHXqHYhsMye++3oBoAZ6SR8R8I= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul v1.14.5 h1:Rm3bUUGUGu2+qsQNs1npBdo99Sr5O5YzY6KW498XUV8= +github.com/hashicorp/consul v1.14.5/go.mod h1:CIalOkrR9VPK2V+2TaCdZTNBNG1m7UVOnIuc/B9EWyk= +github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706/go.mod h1:1Cs8FlmD1BfSQXJGcFLSV5FuIx1AbJP+EJGdxosoS2g= +github.com/hashicorp/consul-net-rpc v0.0.0-20220307172752-3602954411b4/go.mod h1:vWEAHAeAqfOwB3pSgHMQpIu8VH1jL+Ltg54Tw0wt/NI= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/proto-public v0.2.1/go.mod h1:iWNlBDJIZQJC3bBiCThoqg9i7uk/4RQZYkqH1wiQrss= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-bexpr v0.1.0/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU= -github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4= +github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd/go.mod h1:ueUgD9BeIocT7QNuvxSyJyPAM9dfifBcaWmeybb67OY= -github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-connlimit v0.3.0/go.mod h1:OUj9FGL1tPIhl/2RCfzYHrIiWj+VVPGNyVPnUX8AqS0= +github.com/hashicorp/go-discover v0.0.0-20220714221025-1c234a67149a/go.mod h1:1xfdKvc3pe5WKxfUUHHOGaKMk7NLGhHY1jkyhKo6098= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack/v2 v2.0.0/go.mod h1:JIxYkkFJRDDRSoWQBSh7s9QAVThq+82iWmUpmE4jKak= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ= -github.com/hashicorp/go-retryablehttp v0.0.0-20180531211321-3b087ef2d313/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-raftchunking v0.6.2/go.mod h1:cGlg3JtDy7qy6c/3Bu660Mic1JF+7lWqIwCFSb08fX0= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= @@ -796,32 +1077,46 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v0.0.0-20170202080759-03c5bf6be031/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v0.0.0-20180906183839-65a6292f0157/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcp-scada-provider v0.2.0/go.mod h1:Q0WpS2RyhBKOPD4X/8oW7AJe7jA2HXB09EwDzwRTao0= +github.com/hashicorp/hcp-sdk-go v0.23.0/go.mod h1:/9UoDY2FYYA8lFaKBb2HmM/jKYZGANmf65q9QRc/cVw= +github.com/hashicorp/hcp-sdk-go v0.23.1-0.20220921131124-49168300a7dc/go.mod h1:/9UoDY2FYYA8lFaKBb2HmM/jKYZGANmf65q9QRc/cVw= +github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= +github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0/go.mod h1:6pdNz0vo0mF0GvhwDG56O3N18qBrAz/XRIcfINfTbwo= github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs= -github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= -github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.3.11/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= +github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I= +github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42/go.mod h1:wcXL8otVu5cpJVLjcmq7pmfdRCdaP+xnvu7WQcKJAhs= +github.com/hashicorp/raft-boltdb/v2 v2.2.2/go.mod h1:N8YgaZgNJLpZC+h+by7vDu5rzsRgONThTEeUS3zWbfY= github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hashicorp/vault v0.10.3/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= -github.com/hashicorp/vault-plugin-secrets-kv v0.0.0-20190318174639-195e0e9d07f1/go.mod h1:VJHHT2SC1tAPrfENQeBhLlb5FbZoKZM+oC/ROmEftz0= +github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= +github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/heroku/x v0.0.61 h1:yfoAAtnFWSFZj+UlS+RZL/h8QYEp1R4wHVEg0G+Hwh4= github.com/heroku/x v0.0.61/go.mod h1:C7xYbpMdond+s6L5VpniDUSVPRwm3kZum1o7XiD5ZHk= github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= @@ -830,11 +1125,16 @@ github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4 github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible h1:tKTaPHNVwikS3I1rdyf1INNvgJXWSf/+TzqsiGbrgnQ= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.1.3/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df/go.mod h1:QMZY7/J/KSQEhKWFeDesPjMj+wCHReeknARU3wqlyN4= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -849,8 +1149,10 @@ github.com/influxdata/telegraf v1.16.3 h1:x0qeuSGGMg5y+YqP/5ZHwXZu3bcBrO8AAQOTNl github.com/influxdata/telegraf v1.16.3/go.mod h1:fX/6k7qpIqzVPWyeIamb0wN5hbwc0ANUaTS80lPYFB8= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= +github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062/go.mod h1:PcNJqIlcX/dj3DTG/+QQnRvSgTMG6CLpRMjWcv4+J6w= github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= @@ -866,8 +1168,7 @@ github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jefferai/jsonx v0.0.0-20160721235117-9cc31c3135ee/go.mod h1:N0t2vlmpe8nyZB5ouIbJQPDSR+mH6oe7xHB9VZHSUzM= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -880,12 +1181,14 @@ github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible/go.mo github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -897,7 +1200,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= @@ -908,20 +1210,22 @@ github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaR github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/keybase/go-crypto v0.0.0-20180614160407-5114a9a81e1b/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -929,36 +1233,52 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labbsr0x/bindman-dns-webhook v1.0.2/go.mod h1:p6b+VCXIR8NYKpDr8/dg1HKfQoRHCdcsROXKvmoehKA= +github.com/labbsr0x/goh v1.0.1/go.mod h1:8K2UhVoaWXcCU7Lxoa2omWnC8gyW8px7/lmO61c027w= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= -github.com/lib/pq v0.0.0-20180523175426-90697d60dd84/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= +github.com/linode/linodego v0.10.0/go.mod h1:cziNP7pbvE3mXIPneHj0oRY8L1WtGEIKlZ8LANE4eXA= github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw= -github.com/lyft/protoc-gen-validate v0.0.0-20180911180927-64fcb82c878e/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/liquidweb/liquidweb-go v1.6.0/go.mod h1:UDcVnAMDkZxpw4Y7NOHkqoeiGacVLEIG/i5J9cyixzQ= +github.com/lucas-clemente/quic-go v0.13.1/go.mod h1:Vn3/Fb0/77b02SGhQk36KzOUmXgVpFfizUfW5WMaqyU= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marten-seemann/chacha20 v0.2.0/go.mod h1:HSdjFau7GzYRj+ahFNwsO3ouVJr1HFkWoEwNDb4TMtE= +github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= +github.com/marten-seemann/qtls v0.4.1/go.mod h1:pxVXcHHw1pNIt8Qo0pwSYQEoZ8yYOOPXTCZLQQunvRc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -971,6 +1291,7 @@ github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -978,6 +1299,9 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -986,7 +1310,10 @@ github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mholt/certmagic v0.8.3/go.mod h1:91uJzK5K8IWtYQqTi5R2tsxV1pCde+wdGfaRaOZi6aQ= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= @@ -1000,25 +1327,33 @@ github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dz github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v0.0.0-20160804032330-cdac8253d00f/go.mod h1:eOsF2yLPlBBJPvD+nhl5QMTBSOBbOph6N7j/IDUw7PY= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -1044,7 +1379,9 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04/go.mod h1:5sN+Lt1CaY4wsPvgQH/jsuJi4XO2ssZbdsIizr4CVC8= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -1053,30 +1390,35 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nrdcg/auroradns v1.0.0/go.mod h1:6JPXKzIRzZzMqtTDgueIhTi6rFf1QvYE/HzqidhOhjw= +github.com/nrdcg/goinwx v0.6.1/go.mod h1:XPiut7enlbEdntAqalBIqcYcTEVhpv/dKWgDCX2SwKQ= +github.com/nrdcg/namesilo v0.2.1/go.mod h1:lwMvfQTyYq+BbjJd30ylEG4GPSS6PII0Tia4rRpRiyw= github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v0.0.0-20180308005104-6934b124db28/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= @@ -1088,7 +1430,6 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -1099,28 +1440,34 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/oracle/oci-go-sdk v7.0.0+incompatible h1:oj5ESjXwwkFRdhZSnPlShvLWYdt/IZ65RQxveYM3maA= +github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/oracle/oci-go-sdk/v65 v65.41.1 h1:+lbosOyNiib3TGJDvLq1HwEAuFqkOjPJDIkyxM15WdQ= -github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= github.com/oschwald/maxminddb-golang v1.11.0 h1:aSXMqYR/EPNjGE8epgqwDay+P30hCBZIveY0WZbAWh0= github.com/oschwald/maxminddb-golang v1.11.0/go.mod h1:YmVI+H0zh3ySFR3w+oz8PCfglAFj3PuCmui13+P9zDg= +github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014/go.mod h1:joRatxRJaZBsY3JAOEMcoOp05CnZzsx4scTxi95DHyQ= github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/patrickmn/go-cache v0.0.0-20180527043350-9f6ff22cfff8/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1141,14 +1488,17 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc= github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU= -github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= @@ -1158,7 +1508,6 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1167,9 +1516,10 @@ github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -1183,12 +1533,13 @@ github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdD github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 h1:oHcfzdJnM/SFppy2aUlvomk37GI33x9vgJULihE5Dt8= github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97/go.mod h1:LoBCZeRh+5hX+fSULNyFnagYlQG/gBsyA/deNzROkq8= -github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -1197,6 +1548,9 @@ github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuR github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510 h1:6ksZ7t1hNOzGPPs8DK7SvXQf6UfWzi+W5Z7PCBl8gx4= github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510/go.mod h1:UC0TwJiF90m2T3iYPQBKnGu8gv3s55dF/EgpTq8gyvo= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rainycape/memcache v0.0.0-20150622160815-1031fa0ce2f2/go.mod h1:7tZKcyumwBO6qip7RNQ5r77yrssm9bfCowcLEBcU5IA= +github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= @@ -1215,24 +1569,33 @@ github.com/rootless-containers/rootlesskit v1.1.0 h1:cRaRIYxY8oce4eE/zeAUZhgKu/4 github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+vpvc+VaCd7TCIWaJjnV0ujUo= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/ryanuber/columnize v2.1.2+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sacloud/libsacloud v1.26.1/go.mod h1:79ZwATmHLIFZIMd7sxA3LwzVy/B77uj3LDoToVTxDoQ= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= +github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= +github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/shirou/gopsutil v0.0.0-20181107111621-48177ef5f880/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -1249,9 +1612,10 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -1264,23 +1628,31 @@ github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -1300,34 +1672,45 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= -github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8= github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3 h1:avZFY25vRM35FggTBQj2WXq45yEvIKbDLUcNDrJLfKU= github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3/go.mod h1:oJ82xgcBDzGJrEgUsjlTj6n01+ZWUMMUR8BlZzX5xDE= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/timewasted/linode v0.0.0-20160829202747-37e84520dcf7/go.mod h1:imsgLplxEC/etjIhdr3dNzV3JeT27LbVu5pYWm0JCBY= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 h1:hbyjqt5UnyKeOT3rFVxLxi7iTI6XqR2p4TkwEAQdUiw= github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:Q5IRRDY+cjIaiOjTAnXN5LKQV5MPqVx5ofQn85Jy5Yw= +github.com/transip/gotransip v0.0.0-20190812104329-6d8d9179b66f/go.mod h1:i0f4R4o2HM0m3DZYQWsj6/MEowD57VzoH0v3d7igeFY= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/vultr/govultr v0.1.4 h1:UnNMixYFVO0p80itc8PcweoVENyo1PasfvwKhoasR9U= +github.com/vultr/govultr v0.1.4/go.mod h1:9H008Uxr/C4vFNGLqKx232C206GL0PBHzOP0809bGNA= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= @@ -1347,8 +1730,13 @@ github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgk github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1359,18 +1747,30 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= @@ -1382,6 +1782,7 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= @@ -1404,6 +1805,7 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= @@ -1412,6 +1814,7 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= @@ -1420,23 +1823,36 @@ go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= +golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1466,7 +1882,6 @@ golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N0 golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1495,21 +1910,26 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1519,6 +1939,7 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1534,6 +1955,7 @@ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1544,23 +1966,37 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1568,7 +2004,20 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1583,11 +2032,13 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180622082034-63fc586f45fe/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1596,10 +2047,14 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1607,20 +2062,28 @@ golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1648,40 +2111,66 @@ golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= @@ -1690,6 +2179,7 @@ golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fq golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1698,14 +2188,19 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1715,6 +2210,7 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1724,12 +2220,16 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1738,6 +2238,7 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1775,7 +2276,10 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= @@ -1785,6 +2289,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= @@ -1796,7 +2303,6 @@ gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.0.0-20180829000535-087779f1d2c9/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -1818,6 +2324,32 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc= google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1831,12 +2363,14 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1870,22 +2404,83 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM= google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE= google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/grpc v0.0.0-20180920234847-8997b5fa0873/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -1903,10 +2498,24 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1921,8 +2530,10 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/DataDog/dd-trace-go.v1 v1.19.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -1945,8 +2556,12 @@ gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1 gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= +gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= @@ -1955,12 +2570,15 @@ gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mN gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= -gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc/go.mod h1:VV+3haRsgDiVLxyifmMBrBIuCWFBPYKbRssXB9z67Hw= gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= -gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= +gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1979,11 +2597,13 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39/go.mod h1:rbNo0ST5hSazCG4rGfpHrwnwvzP1QX62WbhzD+ghGzs= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1994,26 +2614,37 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.0.0-20180806132203-61b11ee65332/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/api v0.0.0-20190325185214-7544f9db76f6/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= +k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108= k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg= -k8s.io/apimachinery v0.0.0-20180821005732-488889b0007f/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apimachinery v0.0.0-20190223001710-c182ff3b9841/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY= k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw= +k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k= +k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/client-go v0.28.1 h1:pRhMzB8HyLfVwpngWKE8hDcXRqifh1ga2Z/PU9SXVK8= k8s.io/client-go v0.28.1/go.mod h1:pEZA3FqOsVkCc07pFVzK076R+P/eXqsgx5zuuRWukNE= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +k8s.io/utils v0.0.0-20190529001817-6999998975a7/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= modernc.org/httpfs v1.0.0/go.mod h1:BSkfoMUcahSijQD5J/Vu4UMOxzmEf5SNRwyXC4PJBEw= @@ -2030,6 +2661,8 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/modules.txt b/vendor/modules.txt index 90c5028a76f13..3f4f0e17771dc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2201,7 +2201,7 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml # github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible # github.com/Azure/azure-storage-blob-go => github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e -# github.com/hashicorp/consul => github.com/hashicorp/consul v1.5.1 +# github.com/hashicorp/consul => github.com/hashicorp/consul v1.14.5 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd From 27411cff087e6fa2c660c6bfa577c31945ab5232 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Fri, 20 Oct 2023 15:27:55 +0200 Subject: [PATCH 06/33] Introduce worker queue in bloom gateway (#10976) Instead of calling the bloom store directly on each and every request to filter chunk refs based on the given filters, we want to queue requests in per-tenant queues and process batches of requests that can be multiplexed to avoid excessive seeking in the bloom block queriers when checking chunk matches. This PR re-uses the request queue implementation used in the query scheduler. To do so, it moves the queue related code from the scheduler into a separate package `pkg/queue` and renames occurrences of "querier" to "consumer" to be more generic. The bloom gateway instantiates the request queue when starting the service. The gRPC method `FilterChunkRefs` then enqueues incoming requests to that queue. **Special notes for your reviewer**: For testing purposes, this PR also contains a dummy implementation of the workers. The worker implementation - which includes multiplexing of multiple tasks - is subject to a separate PR. --------- Signed-off-by: Christian Haudum --- go.mod | 2 +- pkg/bloomgateway/bloomgateway.go | 261 ++++++++++++++++-- pkg/bloomgateway/bloomgateway_test.go | 53 +++- pkg/lokifrontend/frontend/v1/frontend.go | 12 +- pkg/lokifrontend/frontend/v1/frontend_test.go | 4 +- pkg/{scheduler => }/queue/dequeue_qos_test.go | 12 +- pkg/{scheduler => }/queue/mapping.go | 0 pkg/{scheduler => }/queue/mapping_test.go | 0 pkg/{scheduler => }/queue/metrics.go | 0 pkg/{scheduler => }/queue/queue.go | 42 +-- pkg/{scheduler => }/queue/queue_test.go | 12 +- pkg/{scheduler => }/queue/tenant_queues.go | 144 +++++----- .../queue/tenant_queues_test.go | 108 ++++---- pkg/{scheduler => }/queue/treequeue.go | 0 pkg/{scheduler => }/queue/treequeue_test.go | 0 pkg/scheduler/scheduler.go | 10 +- pkg/util/active_user.go | 10 + 17 files changed, 479 insertions(+), 191 deletions(-) rename pkg/{scheduler => }/queue/dequeue_qos_test.go (93%) rename pkg/{scheduler => }/queue/mapping.go (100%) rename pkg/{scheduler => }/queue/mapping_test.go (100%) rename pkg/{scheduler => }/queue/metrics.go (100%) rename pkg/{scheduler => }/queue/queue.go (86%) rename pkg/{scheduler => }/queue/queue_test.go (96%) rename pkg/{scheduler => }/queue/tenant_queues.go (50%) rename pkg/{scheduler => }/queue/tenant_queues_test.go (81%) rename pkg/{scheduler => }/queue/treequeue.go (100%) rename pkg/{scheduler => }/queue/treequeue_test.go (100%) diff --git a/go.mod b/go.mod index 0a3b7f5343afd..432cd5b97c02b 100644 --- a/go.mod +++ b/go.mod @@ -74,7 +74,7 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/ncw/swift v1.0.53 github.com/oklog/run v1.1.0 - github.com/oklog/ulid v1.3.1 // indirect + github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing-contrib/go-stdlib v1.0.0 github.com/opentracing/opentracing-go v1.2.0 diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index 44eae46e4b84f..2b920e270b694 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -39,27 +39,130 @@ package bloomgateway import ( "context" + "fmt" "sort" + "sync" + "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/grafana/dskit/services" "github.com/grafana/dskit/tenant" + "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/queue" "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper" + "github.com/grafana/loki/pkg/util" ) var errGatewayUnhealthy = errors.New("bloom-gateway is unhealthy in the ring") var errInvalidTenant = errors.New("invalid tenant in chunk refs") -type metrics struct{} +// TODO(chaudum): Make these configurable +const ( + numWorkers = 4 + maxTasksPerTenant = 1024 + pendingTasksInitialCap = 1024 +) -func newMetrics(r prometheus.Registerer) *metrics { - return &metrics{} +type metrics struct { + queueDuration prometheus.Histogram + inflightRequests prometheus.Summary +} + +func newMetrics(subsystem string, registerer prometheus.Registerer) *metrics { + return &metrics{ + queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Namespace: "loki", + Subsystem: subsystem, + Name: "queue_duration_seconds", + Help: "Time spent by tasks in queue before getting picked up by a worker.", + Buckets: prometheus.DefBuckets, + }), + inflightRequests: promauto.With(registerer).NewSummary(prometheus.SummaryOpts{ + Namespace: "loki", + Subsystem: subsystem, + Name: "inflight_tasks", + Help: "Number of inflight tasks (either queued or processing) sampled at a regular interval. Quantile buckets keep track of inflight tasks over the last 60s.", + Objectives: map[float64]float64{0.5: 0.05, 0.75: 0.02, 0.8: 0.02, 0.9: 0.01, 0.95: 0.01, 0.99: 0.001}, + MaxAge: time.Minute, + AgeBuckets: 6, + }), + } +} + +// Task is the data structure that is enqueued to the internal queue and queued by query workers +type Task struct { + // ID is a lexcographically sortable unique identifier of the task + ID ulid.ULID + // Tenant is the tenant ID + Tenant string + // Request is the original request + Request *logproto.FilterChunkRefRequest + // ErrCh is a send-only channel to write an error to + ErrCh chan<- error + // ResCh is a send-only channel to write partial responses to + ResCh chan<- *logproto.GroupedChunkRefs +} + +// newTask returns a new Task that can be enqueued to the task queue. +// As additional arguments, it returns a result and an error channel, as well +// as an error if the instantiation fails. +func newTask(tenantID string, req *logproto.FilterChunkRefRequest) (Task, chan *logproto.GroupedChunkRefs, chan error, error) { + key, err := ulid.New(ulid.Now(), nil) + if err != nil { + return Task{}, nil, nil, err + } + errCh := make(chan error, 1) + resCh := make(chan *logproto.GroupedChunkRefs, 1) + task := Task{ + ID: key, + Tenant: tenantID, + Request: req, + ErrCh: errCh, + ResCh: resCh, + } + return task, resCh, errCh, nil +} + +// SyncMap is a map structure which can be synchronized using the RWMutex +type SyncMap[k comparable, v any] struct { + sync.RWMutex + Map map[k]v +} + +type pendingTasks SyncMap[ulid.ULID, Task] + +func (t *pendingTasks) Len() int { + t.RLock() + defer t.Unlock() + return len(t.Map) +} + +func (t *pendingTasks) Add(k ulid.ULID, v Task) { + t.Lock() + t.Map[k] = v + t.Unlock() +} + +func (t *pendingTasks) Delete(k ulid.ULID) { + t.Lock() + delete(t.Map, k) + t.Unlock() +} + +// makePendingTasks creates a SyncMap that holds pending tasks +func makePendingTasks(n int) *pendingTasks { + return &pendingTasks{ + RWMutex: sync.RWMutex{}, + Map: make(map[ulid.ULID]Task, n), + } } type Gateway struct { @@ -69,20 +172,33 @@ type Gateway struct { logger log.Logger metrics *metrics - bloomStore bloomshipper.Store + queue *queue.RequestQueue + queueMetrics *queue.Metrics + activeUsers *util.ActiveUsersCleanupService + bloomStore bloomshipper.Store sharding ShardingStrategy + + pendingTasks *pendingTasks + + serviceMngr *services.Manager + serviceWatcher *services.FailureWatcher } // New returns a new instance of the Bloom Gateway. func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, shardingStrategy ShardingStrategy, cm storage.ClientMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) { g := &Gateway{ - cfg: cfg, - logger: logger, - metrics: newMetrics(reg), - sharding: shardingStrategy, + cfg: cfg, + logger: logger, + metrics: newMetrics("bloom_gateway", reg), + sharding: shardingStrategy, + pendingTasks: makePendingTasks(pendingTasksInitialCap), } + g.queueMetrics = queue.NewMetrics("bloom_gateway", reg) + g.queue = queue.NewRequestQueue(maxTasksPerTenant, time.Minute, g.queueMetrics) + g.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(g.queueMetrics.Cleanup) + client, err := bloomshipper.NewBloomClient(schemaCfg.Configs, storageCfg, cm) if err != nil { return nil, err @@ -99,18 +215,112 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, s } g.bloomStore = bloomStore - g.Service = services.NewIdleService(g.starting, g.stopping) + + svcs := []services.Service{g.queue, g.activeUsers} + g.serviceMngr, err = services.NewManager(svcs...) + if err != nil { + return nil, err + } + g.serviceWatcher = services.NewFailureWatcher() + g.serviceWatcher.WatchManager(g.serviceMngr) + + g.Service = services.NewBasicService(g.starting, g.running, g.stopping).WithName("bloom-gateway") return g, nil } func (g *Gateway) starting(ctx context.Context) error { + var err error + defer func() { + if err == nil || g.serviceMngr == nil { + return + } + if err := services.StopManagerAndAwaitStopped(context.Background(), g.serviceMngr); err != nil { + level.Error(g.logger).Log("msg", "failed to gracefully stop bloom gateway dependencies", "err", err) + } + }() + + if err := services.StartManagerAndAwaitHealthy(ctx, g.serviceMngr); err != nil { + return errors.Wrap(err, "unable to start bloom gateway subservices") + } + + for i := 0; i < numWorkers; i++ { + go g.startWorker(ctx, fmt.Sprintf("worker-%d", i)) + } + return nil } +func (g *Gateway) running(ctx context.Context) error { + // We observe inflight tasks frequently and at regular intervals, to have a good + // approximation of max inflight tasks over percentiles of time. We also do it with + // a ticker so that we keep tracking it even if we have no new requests but stuck inflight + // tasks (eg. worker are all exhausted). + inflightTasksTicker := time.NewTicker(250 * time.Millisecond) + defer inflightTasksTicker.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case err := <-g.serviceWatcher.Chan(): + return errors.Wrap(err, "bloom gateway subservice failed") + case <-inflightTasksTicker.C: + inflight := g.pendingTasks.Len() + g.metrics.inflightRequests.Observe(float64(inflight)) + } + } +} + func (g *Gateway) stopping(_ error) error { g.bloomStore.Stop() - return nil + return services.StopManagerAndAwaitStopped(context.Background(), g.serviceMngr) +} + +// This is just a dummy implementation of the worker! +// TODO(chaudum): Implement worker that dequeues multiple pending tasks and +// multiplexes them prior to execution. +func (g *Gateway) startWorker(_ context.Context, id string) error { + level.Info(g.logger).Log("msg", "starting worker", "worker", id) + + g.queue.RegisterConsumerConnection(id) + defer g.queue.UnregisterConsumerConnection(id) + + idx := queue.StartIndexWithLocalQueue + + for { + ctx := context.Background() + item, newIdx, err := g.queue.Dequeue(ctx, idx, id) + if err != nil { + if err != queue.ErrStopped { + level.Error(g.logger).Log("msg", "failed to dequeue task", "worker", id, "err", err) + continue + } + level.Info(g.logger).Log("msg", "stopping worker", "worker", id) + return err + } + task, ok := item.(Task) + if !ok { + level.Error(g.logger).Log("msg", "failed to cast to Task", "item", item) + continue + } + + idx = newIdx + level.Info(g.logger).Log("msg", "dequeued task", "worker", id, "task", task.ID) + g.pendingTasks.Delete(task.ID) + + r := task.Request + if len(r.Filters) > 0 { + r.Refs, err = g.bloomStore.FilterChunkRefs(ctx, task.Tenant, r.From.Time(), r.Through.Time(), r.Refs, r.Filters...) + } + if err != nil { + task.ErrCh <- err + } else { + for _, ref := range r.Refs { + task.ResCh <- ref + } + } + } } // FilterChunkRefs implements BloomGatewayServer @@ -131,15 +341,32 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk return req.Refs[i].Fingerprint < req.Refs[j].Fingerprint }) - chunkRefs := req.Refs + task, resCh, errCh, err := newTask(tenantID, req) + if err != nil { + return nil, err + } + + g.activeUsers.UpdateUserTimestamp(tenantID, time.Now()) + level.Info(g.logger).Log("msg", "enqueue task", "task", task.ID) + g.queue.Enqueue(tenantID, []string{}, task, 100, func() { + // When enqueuing, we also add the task to the pending tasks + g.pendingTasks.Add(task.ID, task) + }) - // Only query bloom filters if filters are present - if len(req.Filters) > 0 { - chunkRefs, err = g.bloomStore.FilterChunkRefs(ctx, tenantID, req.From.Time(), req.Through.Time(), req.Refs, req.Filters...) - if err != nil { + response := make([]*logproto.GroupedChunkRefs, 0, len(req.Refs)) + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-errCh: return nil, err + case res := <-resCh: + level.Info(g.logger).Log("msg", "got result", "task", task.ID, "tenant", tenantID, "res", res) + // wait for all parts of the full response + response = append(response, res) + if len(response) == len(req.Refs) { + return &logproto.FilterChunkRefResponse{ChunkRefs: response}, nil + } } } - - return &logproto.FilterChunkRefResponse{ChunkRefs: chunkRefs}, nil } diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go index 285d11aaf4bf3..bfb2b9b9d8e21 100644 --- a/pkg/bloomgateway/bloomgateway_test.go +++ b/pkg/bloomgateway/bloomgateway_test.go @@ -2,6 +2,7 @@ package bloomgateway import ( "context" + "os" "testing" "time" @@ -87,6 +88,10 @@ func TestBloomGateway_StartStopService(t *testing.T) { err = services.StartAndAwaitRunning(context.Background(), gw) require.NoError(t, err) + // Wait for workers to connect to queue + time.Sleep(50 * time.Millisecond) + require.Equal(t, float64(numWorkers), gw.queue.GetConnectedConsumersMetric()) + err = services.StopAndAwaitTerminated(context.Background(), gw) require.NoError(t, err) }) @@ -96,7 +101,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { tenantID := "test" ss := NewNoopStrategy() - logger := log.NewNopLogger() + logger := log.NewLogfmtLogger(os.Stderr) reg := prometheus.NewRegistry() cm := storage.NewClientMetrics() @@ -136,9 +141,17 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { } t.Run("returns unfiltered chunk refs if no filters provided", func(t *testing.T) { + reg := prometheus.NewRegistry() gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg) require.NoError(t, err) + err = services.StartAndAwaitRunning(context.Background(), gw) + require.NoError(t, err) + t.Cleanup(func() { + err = services.StopAndAwaitTerminated(context.Background(), gw) + require.NoError(t, err) + }) + ts, _ := time.Parse("2006-01-02 15:04", "2023-10-03 10:00") now := model.TimeFromUnix(ts.Unix()) @@ -174,6 +187,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { }) t.Run("returns error if chunk refs do not belong to tenant", func(t *testing.T) { + reg := prometheus.NewRegistry() gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg) require.NoError(t, err) @@ -196,4 +210,41 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { require.Equal(t, "expected chunk refs from tenant test, got tenant other: invalid tenant in chunk refs", err.Error()) }) + t.Run("gateway tracks active users", func(t *testing.T) { + reg := prometheus.NewRegistry() + gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg) + require.NoError(t, err) + + err = services.StartAndAwaitRunning(context.Background(), gw) + require.NoError(t, err) + t.Cleanup(func() { + err = services.StopAndAwaitTerminated(context.Background(), gw) + require.NoError(t, err) + }) + + ts, _ := time.Parse("2006-01-02 15:04", "2023-10-03 10:00") + now := model.TimeFromUnix(ts.Unix()) + + tenants := []string{"tenant-a", "tenant-b", "tenant-c"} + for idx, tenantID := range tenants { + chunkRefs := []*logproto.ChunkRef{ + { + Fingerprint: uint64(1000 + 100*idx), + UserID: tenantID, + From: now.Add(-24 * time.Hour), + Through: now, + Checksum: uint32(idx), + }, + } + req := &logproto.FilterChunkRefRequest{ + From: now.Add(-24 * time.Hour), + Through: now, + Refs: groupRefs(t, chunkRefs), + } + ctx := user.InjectOrgID(context.Background(), tenantID) + _, err = gw.FilterChunkRefs(ctx, req) + require.NoError(t, err) + } + require.Equal(t, tenants, gw.activeUsers.ActiveUsers()) + }) } diff --git a/pkg/lokifrontend/frontend/v1/frontend.go b/pkg/lokifrontend/frontend/v1/frontend.go index 320e3dd50f437..836baf283a37e 100644 --- a/pkg/lokifrontend/frontend/v1/frontend.go +++ b/pkg/lokifrontend/frontend/v1/frontend.go @@ -20,7 +20,7 @@ import ( "github.com/grafana/loki/pkg/lokifrontend/frontend/v1/frontendv1pb" "github.com/grafana/loki/pkg/querier/stats" - "github.com/grafana/loki/pkg/scheduler/queue" + "github.com/grafana/loki/pkg/queue" "github.com/grafana/loki/pkg/util" lokigrpc "github.com/grafana/loki/pkg/util/httpgrpc" "github.com/grafana/loki/pkg/util/validation" @@ -106,7 +106,7 @@ func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Regist f.numClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ Name: "cortex_query_frontend_connected_clients", Help: "Number of worker clients currently connected to the frontend.", - }, f.requestQueue.GetConnectedQuerierWorkersMetric) + }, f.requestQueue.GetConnectedConsumersMetric) f.Service = services.NewBasicService(f.starting, f.running, f.stopping) return f, nil @@ -189,8 +189,8 @@ func (f *Frontend) Process(server frontendv1pb.Frontend_ProcessServer) error { return err } - f.requestQueue.RegisterQuerierConnection(querierID) - defer f.requestQueue.UnregisterQuerierConnection(querierID) + f.requestQueue.RegisterConsumerConnection(querierID) + defer f.requestQueue.UnregisterConsumerConnection(querierID) lastIndex := queue.StartIndex @@ -273,7 +273,7 @@ func (f *Frontend) Process(server frontendv1pb.Frontend_ProcessServer) error { func (f *Frontend) NotifyClientShutdown(_ context.Context, req *frontendv1pb.NotifyClientShutdownRequest) (*frontendv1pb.NotifyClientShutdownResponse, error) { level.Info(f.log).Log("msg", "received shutdown notification from querier", "querier", req.GetClientID()) - f.requestQueue.NotifyQuerierShutdown(req.GetClientID()) + f.requestQueue.NotifyConsumerShutdown(req.GetClientID()) return &frontendv1pb.NotifyClientShutdownResponse{}, nil } @@ -327,7 +327,7 @@ func (f *Frontend) queueRequest(ctx context.Context, req *request) error { // chosen to match the same method in the ingester func (f *Frontend) CheckReady(_ context.Context) error { // if we have more than one querier connected we will consider ourselves ready - connectedClients := f.requestQueue.GetConnectedQuerierWorkersMetric() + connectedClients := f.requestQueue.GetConnectedConsumersMetric() if connectedClients > 0 { return nil } diff --git a/pkg/lokifrontend/frontend/v1/frontend_test.go b/pkg/lokifrontend/frontend/v1/frontend_test.go index 82422b79b5d43..cbe34776e6ecf 100644 --- a/pkg/lokifrontend/frontend/v1/frontend_test.go +++ b/pkg/lokifrontend/frontend/v1/frontend_test.go @@ -33,7 +33,7 @@ import ( "github.com/grafana/loki/pkg/querier/queryrange" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" querier_worker "github.com/grafana/loki/pkg/querier/worker" - "github.com/grafana/loki/pkg/scheduler/queue" + "github.com/grafana/loki/pkg/queue" ) const ( @@ -136,7 +136,7 @@ func TestFrontendCheckReady(t *testing.T) { requestQueue: queue.NewRequestQueue(5, 0, qm), } for i := 0; i < tt.connectedClients; i++ { - f.requestQueue.RegisterQuerierConnection("test") + f.requestQueue.RegisterConsumerConnection("test") } err := f.CheckReady(context.Background()) errMsg := "" diff --git a/pkg/scheduler/queue/dequeue_qos_test.go b/pkg/queue/dequeue_qos_test.go similarity index 93% rename from pkg/scheduler/queue/dequeue_qos_test.go rename to pkg/queue/dequeue_qos_test.go index 82dc3f66a2079..0709f4723dfb6 100644 --- a/pkg/scheduler/queue/dequeue_qos_test.go +++ b/pkg/queue/dequeue_qos_test.go @@ -60,7 +60,7 @@ func BenchmarkQueryFairness(t *testing.B) { enqueueRequestsForActor(t, []string{}, useActor, requestQueue, numSubRequestsActorA, 50*time.Millisecond) enqueueRequestsForActor(t, []string{"a"}, useActor, requestQueue, numSubRequestsActorA, 100*time.Millisecond) enqueueRequestsForActor(t, []string{"b"}, useActor, requestQueue, numSubRequestsActorB, 50*time.Millisecond) - requestQueue.queues.recomputeUserQueriers() + requestQueue.queues.recomputeUserConsumers() // set timeout to minize impact on overall test run duration in case something goes wrong ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -76,8 +76,8 @@ func BenchmarkQueryFairness(t *testing.B) { go func(id string) { defer wg.Done() - requestQueue.RegisterQuerierConnection(id) - defer requestQueue.UnregisterQuerierConnection(id) + requestQueue.RegisterConsumerConnection(id) + defer requestQueue.UnregisterConsumerConnection(id) idx := StartIndex for ctx.Err() == nil { r, newIdx, err := requestQueue.Dequeue(ctx, idx, id) @@ -143,7 +143,7 @@ func TestQueryFairnessAcrossSameLevel(t *testing.T) { _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(22), 0, nil) _ = requestQueue.Enqueue("tenant1", []string{"xyz", "123"}, r(200), 0, nil) _ = requestQueue.Enqueue("tenant1", []string{"xyz", "456"}, r(210), 0, nil) - requestQueue.queues.recomputeUserQueriers() + requestQueue.queues.recomputeUserConsumers() items := make([]int, 0) @@ -151,8 +151,8 @@ func TestQueryFairnessAcrossSameLevel(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() - requestQueue.RegisterQuerierConnection("querier") - defer requestQueue.UnregisterQuerierConnection("querier") + requestQueue.RegisterConsumerConnection("querier") + defer requestQueue.UnregisterConsumerConnection("querier") idx := StartIndexWithLocalQueue for ctx.Err() == nil { diff --git a/pkg/scheduler/queue/mapping.go b/pkg/queue/mapping.go similarity index 100% rename from pkg/scheduler/queue/mapping.go rename to pkg/queue/mapping.go diff --git a/pkg/scheduler/queue/mapping_test.go b/pkg/queue/mapping_test.go similarity index 100% rename from pkg/scheduler/queue/mapping_test.go rename to pkg/queue/mapping_test.go diff --git a/pkg/scheduler/queue/metrics.go b/pkg/queue/metrics.go similarity index 100% rename from pkg/scheduler/queue/metrics.go rename to pkg/queue/metrics.go diff --git a/pkg/scheduler/queue/queue.go b/pkg/queue/queue.go similarity index 86% rename from pkg/scheduler/queue/queue.go rename to pkg/queue/queue.go index cb25388a0c5da..fa1860e4e88d3 100644 --- a/pkg/scheduler/queue/queue.go +++ b/pkg/queue/queue.go @@ -51,7 +51,7 @@ type RequestChannel chan Request type RequestQueue struct { services.Service - connectedQuerierWorkers *atomic.Int32 + connectedConsumers *atomic.Int32 mtx sync.Mutex cond contextCond // Notified when request is enqueued or dequeued, or querier is disconnected. @@ -63,13 +63,13 @@ type RequestQueue struct { func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, metrics *Metrics) *RequestQueue { q := &RequestQueue{ - queues: newTenantQueues(maxOutstandingPerTenant, forgetDelay), - connectedQuerierWorkers: atomic.NewInt32(0), - metrics: metrics, + queues: newTenantQueues(maxOutstandingPerTenant, forgetDelay), + connectedConsumers: atomic.NewInt32(0), + metrics: metrics, } q.cond = contextCond{Cond: sync.NewCond(&q.mtx)} - q.Service = services.NewTimerService(forgetCheckPeriod, nil, q.forgetDisconnectedQueriers, q.stopping).WithName("request queue") + q.Service = services.NewTimerService(forgetCheckPeriod, nil, q.forgetDisconnectedConsumers, q.stopping).WithName("request queue") return q } @@ -127,8 +127,8 @@ func (q *RequestQueue) Enqueue(tenant string, path []string, req Request, maxQue // Dequeue find next tenant queue and takes the next request off of it. Will block if there are no requests. // By passing tenant index from previous call of this method, querier guarantees that it iterates over all tenants fairly. -// If querier finds that request from the tenant is already expired, it can get a request for the same tenant by using UserIndex.ReuseLastUser. -func (q *RequestQueue) Dequeue(ctx context.Context, last QueueIndex, querierID string) (Request, QueueIndex, error) { +// If consumer finds that request from the tenant is already expired, it can get a request for the same tenant by using UserIndex.ReuseLastUser. +func (q *RequestQueue) Dequeue(ctx context.Context, last QueueIndex, consumerID string) (Request, QueueIndex, error) { q.mtx.Lock() defer q.mtx.Unlock() @@ -140,7 +140,7 @@ FindQueue: querierWait = false start := time.Now() q.cond.Wait(ctx) - q.metrics.querierWaitTime.WithLabelValues(querierID).Observe(time.Since(start).Seconds()) + q.metrics.querierWaitTime.WithLabelValues(consumerID).Observe(time.Since(start).Seconds()) } if q.stopped { @@ -152,7 +152,7 @@ FindQueue: } for { - queue, tenant, idx := q.queues.getNextQueueForQuerier(last, querierID) + queue, tenant, idx := q.queues.getNextQueueForConsumer(last, consumerID) last = idx if queue == nil { break @@ -181,11 +181,11 @@ FindQueue: goto FindQueue } -func (q *RequestQueue) forgetDisconnectedQueriers(_ context.Context) error { +func (q *RequestQueue) forgetDisconnectedConsumers(_ context.Context) error { q.mtx.Lock() defer q.mtx.Unlock() - if q.queues.forgetDisconnectedQueriers(time.Now()) > 0 { + if q.queues.forgetDisconnectedConsumers(time.Now()) > 0 { // We need to notify goroutines cause having removed some queriers // may have caused a resharding. q.cond.Broadcast() @@ -198,7 +198,7 @@ func (q *RequestQueue) stopping(_ error) error { q.mtx.Lock() defer q.mtx.Unlock() - for !q.queues.hasNoTenantQueues() && q.connectedQuerierWorkers.Load() > 0 { + for !q.queues.hasNoTenantQueues() && q.connectedConsumers.Load() > 0 { q.cond.Wait(context.Background()) } @@ -211,30 +211,30 @@ func (q *RequestQueue) stopping(_ error) error { return nil } -func (q *RequestQueue) RegisterQuerierConnection(querier string) { - q.connectedQuerierWorkers.Inc() +func (q *RequestQueue) RegisterConsumerConnection(querier string) { + q.connectedConsumers.Inc() q.mtx.Lock() defer q.mtx.Unlock() - q.queues.addQuerierConnection(querier) + q.queues.addConsumerToConnection(querier) } -func (q *RequestQueue) UnregisterQuerierConnection(querier string) { - q.connectedQuerierWorkers.Dec() +func (q *RequestQueue) UnregisterConsumerConnection(querier string) { + q.connectedConsumers.Dec() q.mtx.Lock() defer q.mtx.Unlock() - q.queues.removeQuerierConnection(querier, time.Now()) + q.queues.removeConsumerConnection(querier, time.Now()) } -func (q *RequestQueue) NotifyQuerierShutdown(querierID string) { +func (q *RequestQueue) NotifyConsumerShutdown(querierID string) { q.mtx.Lock() defer q.mtx.Unlock() q.queues.notifyQuerierShutdown(querierID) } -func (q *RequestQueue) GetConnectedQuerierWorkersMetric() float64 { - return float64(q.connectedQuerierWorkers.Load()) +func (q *RequestQueue) GetConnectedConsumersMetric() float64 { + return float64(q.connectedConsumers.Load()) } // contextCond is a *sync.Cond with Wait() method overridden to support context-based waiting. diff --git a/pkg/scheduler/queue/queue_test.go b/pkg/queue/queue_test.go similarity index 96% rename from pkg/scheduler/queue/queue_test.go rename to pkg/queue/queue_test.go index 86adbbfe53a1c..fe8d1a0a6a3eb 100644 --- a/pkg/scheduler/queue/queue_test.go +++ b/pkg/queue/queue_test.go @@ -49,7 +49,7 @@ func BenchmarkGetNextRequest(b *testing.B) { queues = append(queues, queue) for ix := 0; ix < queriers; ix++ { - queue.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) + queue.RegisterConsumerConnection(fmt.Sprintf("querier-%d", ix)) } for i := 0; i < maxOutstandingPerTenant; i++ { @@ -106,7 +106,7 @@ func BenchmarkQueueRequest(b *testing.B) { q := NewRequestQueue(maxOutstandingPerTenant, 0, NewMetrics("query_scheduler", nil)) for ix := 0; ix < queriers; ix++ { - q.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) + q.RegisterConsumerConnection(fmt.Sprintf("querier-%d", ix)) } queues = append(queues, q) @@ -143,8 +143,8 @@ func TestRequestQueue_GetNextRequestForQuerier_ShouldGetRequestAfterReshardingBe }) // Two queriers connect. - queue.RegisterQuerierConnection("querier-1") - queue.RegisterQuerierConnection("querier-2") + queue.RegisterConsumerConnection("querier-1") + queue.RegisterConsumerConnection("querier-2") // Querier-2 waits for a new request. querier2wg := sync.WaitGroup{} @@ -156,7 +156,7 @@ func TestRequestQueue_GetNextRequestForQuerier_ShouldGetRequestAfterReshardingBe }() // Querier-1 crashes (no graceful shutdown notification). - queue.UnregisterQuerierConnection("querier-1") + queue.UnregisterConsumerConnection("querier-1") // Enqueue a request from an user which would be assigned to querier-1. // NOTE: "user-1" hash falls in the querier-1 shard. @@ -305,7 +305,7 @@ func TestMaxQueueSize(t *testing.T) { t.Run("queue size is tracked per tenant", func(t *testing.T) { maxSize := 3 queue := NewRequestQueue(maxSize, 0, NewMetrics("query_scheduler", nil)) - queue.RegisterQuerierConnection("querier") + queue.RegisterConsumerConnection("querier") // enqueue maxSize items with different actors // different actors have individual channels with maxSize length diff --git a/pkg/scheduler/queue/tenant_queues.go b/pkg/queue/tenant_queues.go similarity index 50% rename from pkg/scheduler/queue/tenant_queues.go rename to pkg/queue/tenant_queues.go index 9a7b42cbfdef7..46e8a999fb88e 100644 --- a/pkg/scheduler/queue/tenant_queues.go +++ b/pkg/queue/tenant_queues.go @@ -38,35 +38,35 @@ func (tqs intPointerMap) Dec(key string) int { return *ptr } -// querier holds information about a querier registered in the queue. -type querier struct { +// consumer holds information about a consumer registered in the queue. +type consumer struct { // Number of active connections. connections int - // True if the querier notified it's gracefully shutting down. + // True if the consumer notified it's gracefully shutting down. shuttingDown bool // When the last connection has been unregistered. disconnectedAt time.Time } -// This struct holds tenant queues for pending requests. It also keeps track of connected queriers, -// and mapping between tenants and queriers. +// This struct holds tenant queues for pending requests. It also keeps track of connected consumers, +// and mapping between tenants and consumers. type tenantQueues struct { mapping *Mapping[*tenantQueue] maxUserQueueSize int perUserQueueLen intPointerMap - // How long to wait before removing a querier which has got disconnected + // How long to wait before removing a consumer which has got disconnected // but hasn't notified about a graceful shutdown. forgetDelay time.Duration - // Tracks queriers registered to the queue. - queriers map[string]*querier + // Tracks consumers registered to the queue. + consumers map[string]*consumer - // Sorted list of querier names, used when creating per-user shard. - sortedQueriers []string + // sortedConsumer list of consumer IDs, used when creating per-user shard. + sortedConsumers []string } type Queue interface { @@ -86,12 +86,12 @@ type Mapable interface { type tenantQueue struct { *TreeQueue - // If not nil, only these queriers can handle user requests. If nil, all queriers can. - // We set this to nil if number of available queriers <= maxQueriers. - queriers map[string]struct{} + // If not nil, only these consumers can handle user requests. If nil, all consumers can. + // We set this to nil if number of available consumers <= maxQueriers. + consumers map[string]struct{} maxQueriers int - // Seed for shuffle sharding of queriers. This seed is based on userID only and is therefore consistent + // Seed for shuffle sharding of consumers. This seed is based on userID only and is therefore consistent // between different frontends. seed int64 } @@ -104,8 +104,8 @@ func newTenantQueues(maxUserQueueSize int, forgetDelay time.Duration) *tenantQue maxUserQueueSize: maxUserQueueSize, perUserQueueLen: make(intPointerMap), forgetDelay: forgetDelay, - queriers: map[string]*querier{}, - sortedQueriers: nil, + consumers: map[string]*consumer{}, + sortedConsumers: nil, } } @@ -118,9 +118,9 @@ func (q *tenantQueues) deleteQueue(tenant string) { } // Returns existing or new queue for a tenant. -// MaxQueriers is used to compute which queriers should handle requests for this tenant. -// If maxQueriers is <= 0, all queriers can handle this tenant's requests. -// If maxQueriers has changed since the last call, queriers for this are recomputed. +// MaxQueriers is used to compute which consumers should handle requests for this tenant. +// If maxQueriers is <= 0, all consumers can handle this tenant's requests. +// If maxQueriers has changed since the last call, consumers for this are recomputed. func (q *tenantQueues) getOrAddQueue(tenant string, path []string, maxQueriers int) Queue { // Empty tenant is not allowed, as that would break our tenants list ("" is used for free spot). if tenant == "" { @@ -142,7 +142,7 @@ func (q *tenantQueues) getOrAddQueue(tenant string, path []string, maxQueriers i if uq.maxQueriers != maxQueriers { uq.maxQueriers = maxQueriers - uq.queriers = shuffleQueriersForTenants(uq.seed, maxQueriers, q.sortedQueriers, nil) + uq.consumers = shuffleConsumersForTenants(uq.seed, maxQueriers, q.sortedConsumers, nil) } if len(path) == 0 { @@ -151,10 +151,10 @@ func (q *tenantQueues) getOrAddQueue(tenant string, path []string, maxQueriers i return uq.add(path) } -// Finds next queue for the querier. To support fair scheduling between users, client is expected +// Finds next queue for the consumer. To support fair scheduling between users, client is expected // to pass last user index returned by this function as argument. Is there was no previous // last user index, use -1. -func (q *tenantQueues) getNextQueueForQuerier(lastUserIndex QueueIndex, querierID string) (Queue, string, QueueIndex) { +func (q *tenantQueues) getNextQueueForConsumer(lastUserIndex QueueIndex, consumerID string) (Queue, string, QueueIndex) { uid := lastUserIndex // at the RequestQueue level we don't have local queues, so start index is -1 @@ -162,9 +162,9 @@ func (q *tenantQueues) getNextQueueForQuerier(lastUserIndex QueueIndex, querierI uid = StartIndex } - // Ensure the querier is not shutting down. If the querier is shutting down, we shouldn't forward + // Ensure the consumer is not shutting down. If the consumer is shutting down, we shouldn't forward // any more queries to it. - if info := q.queriers[querierID]; info == nil || info.shuttingDown { + if info := q.consumers[consumerID]; info == nil || info.shuttingDown { return nil, "", uid } @@ -180,9 +180,9 @@ func (q *tenantQueues) getNextQueueForQuerier(lastUserIndex QueueIndex, querierI } uid = tq.pos - if tq.queriers != nil { - if _, ok := tq.queriers[querierID]; !ok { - // This querier is not handling the user. + if tq.consumers != nil { + if _, ok := tq.consumers[consumerID]; !ok { + // This consumer is not handling the user. continue } } @@ -192,30 +192,30 @@ func (q *tenantQueues) getNextQueueForQuerier(lastUserIndex QueueIndex, querierI return nil, "", uid } -func (q *tenantQueues) addQuerierConnection(querierID string) { - info := q.queriers[querierID] +func (q *tenantQueues) addConsumerToConnection(consumerID string) { + info := q.consumers[consumerID] if info != nil { info.connections++ - // Reset in case the querier re-connected while it was in the forget waiting period. + // Reset in case the consumer re-connected while it was in the forget waiting period. info.shuttingDown = false info.disconnectedAt = time.Time{} return } - // First connection from this querier. - q.queriers[querierID] = &querier{connections: 1} - q.sortedQueriers = append(q.sortedQueriers, querierID) - sort.Strings(q.sortedQueriers) + // First connection from this consumer. + q.consumers[consumerID] = &consumer{connections: 1} + q.sortedConsumers = append(q.sortedConsumers, consumerID) + sort.Strings(q.sortedConsumers) - q.recomputeUserQueriers() + q.recomputeUserConsumers() } -func (q *tenantQueues) removeQuerierConnection(querierID string, now time.Time) { - info := q.queriers[querierID] +func (q *tenantQueues) removeConsumerConnection(consumerID string, now time.Time) { + info := q.consumers[consumerID] if info == nil || info.connections <= 0 { - panic("unexpected number of connections for querier") + panic("unexpected number of connections for consumer") } // Decrease the number of active connections. @@ -225,65 +225,65 @@ func (q *tenantQueues) removeQuerierConnection(querierID string, now time.Time) } // There no more active connections. If the forget delay is configured then - // we can remove it only if querier has announced a graceful shutdown. + // we can remove it only if consumer has announced a graceful shutdown. if info.shuttingDown || q.forgetDelay == 0 { - q.removeQuerier(querierID) + q.removeConsumer(consumerID) return } // No graceful shutdown has been notified yet, so we should track the current time - // so that we'll remove the querier as soon as we receive the graceful shutdown + // so that we'll remove the consumer as soon as we receive the graceful shutdown // notification (if any) or once the threshold expires. info.disconnectedAt = now } -func (q *tenantQueues) removeQuerier(querierID string) { - delete(q.queriers, querierID) +func (q *tenantQueues) removeConsumer(consumerID string) { + delete(q.consumers, consumerID) - ix := sort.SearchStrings(q.sortedQueriers, querierID) - if ix >= len(q.sortedQueriers) || q.sortedQueriers[ix] != querierID { - panic("incorrect state of sorted queriers") + ix := sort.SearchStrings(q.sortedConsumers, consumerID) + if ix >= len(q.sortedConsumers) || q.sortedConsumers[ix] != consumerID { + panic("incorrect state of sorted consumers") } - q.sortedQueriers = append(q.sortedQueriers[:ix], q.sortedQueriers[ix+1:]...) + q.sortedConsumers = append(q.sortedConsumers[:ix], q.sortedConsumers[ix+1:]...) - q.recomputeUserQueriers() + q.recomputeUserConsumers() } -// notifyQuerierShutdown records that a querier has sent notification about a graceful shutdown. -func (q *tenantQueues) notifyQuerierShutdown(querierID string) { - info := q.queriers[querierID] +// notifyQuerierShutdown records that a consumer has sent notification about a graceful shutdown. +func (q *tenantQueues) notifyQuerierShutdown(consumerID string) { + info := q.consumers[consumerID] if info == nil { - // The querier may have already been removed, so we just ignore it. + // The consumer may have already been removed, so we just ignore it. return } - // If there are no more connections, we should remove the querier. + // If there are no more connections, we should remove the consumer. if info.connections == 0 { - q.removeQuerier(querierID) + q.removeConsumer(consumerID) return } // Otherwise we should annotate we received a graceful shutdown notification - // and the querier will be removed once all connections are unregistered. + // and the consumer will be removed once all connections are unregistered. info.shuttingDown = true } -// forgetDisconnectedQueriers removes all disconnected queriers that have gone since at least -// the forget delay. Returns the number of forgotten queriers. -func (q *tenantQueues) forgetDisconnectedQueriers(now time.Time) int { +// forgetDisconnectedConsumers removes all disconnected consumer that have gone since at least +// the forget delay. Returns the number of forgotten consumers. +func (q *tenantQueues) forgetDisconnectedConsumers(now time.Time) int { // Nothing to do if the forget delay is disabled. if q.forgetDelay == 0 { return 0 } - // Remove all queriers with no connections that have gone since at least the forget delay. + // Remove all consumers with no connections that have gone since at least the forget delay. threshold := now.Add(-q.forgetDelay) forgotten := 0 - for querierID := range q.queriers { - if info := q.queriers[querierID]; info.connections == 0 && info.disconnectedAt.Before(threshold) { - q.removeQuerier(querierID) + for id := range q.consumers { + if info := q.consumers[id]; info.connections == 0 && info.disconnectedAt.Before(threshold) { + q.removeConsumer(id) forgotten++ } } @@ -291,30 +291,30 @@ func (q *tenantQueues) forgetDisconnectedQueriers(now time.Time) int { return forgotten } -func (q *tenantQueues) recomputeUserQueriers() { - scratchpad := make([]string, 0, len(q.sortedQueriers)) +func (q *tenantQueues) recomputeUserConsumers() { + scratchpad := make([]string, 0, len(q.sortedConsumers)) for _, uq := range q.mapping.Values() { - uq.queriers = shuffleQueriersForTenants(uq.seed, uq.maxQueriers, q.sortedQueriers, scratchpad) + uq.consumers = shuffleConsumersForTenants(uq.seed, uq.maxQueriers, q.sortedConsumers, scratchpad) } } -// shuffleQueriersForTenants returns nil if queriersToSelect is 0 or there are not enough queriers to select from. -// In that case *all* queriers should be used. +// shuffleConsumersForTenants returns nil if consumersToSelect is 0 or there are not enough consumers to select from. +// In that case *all* consumers should be used. // Scratchpad is used for shuffling, to avoid new allocations. If nil, new slice is allocated. -func shuffleQueriersForTenants(userSeed int64, queriersToSelect int, allSortedQueriers []string, scratchpad []string) map[string]struct{} { - if queriersToSelect == 0 || len(allSortedQueriers) <= queriersToSelect { +func shuffleConsumersForTenants(userSeed int64, consumersToSelect int, allSortedConsumers []string, scratchpad []string) map[string]struct{} { + if consumersToSelect == 0 || len(allSortedConsumers) <= consumersToSelect { return nil } - result := make(map[string]struct{}, queriersToSelect) + result := make(map[string]struct{}, consumersToSelect) rnd := rand.New(rand.NewSource(userSeed)) scratchpad = scratchpad[:0] - scratchpad = append(scratchpad, allSortedQueriers...) + scratchpad = append(scratchpad, allSortedConsumers...) last := len(scratchpad) - 1 - for i := 0; i < queriersToSelect; i++ { + for i := 0; i < consumersToSelect; i++ { r := rnd.Intn(last + 1) result[scratchpad[r]] = struct{}{} // move selected item to the end, it won't be selected anymore. diff --git a/pkg/scheduler/queue/tenant_queues_test.go b/pkg/queue/tenant_queues_test.go similarity index 81% rename from pkg/scheduler/queue/tenant_queues_test.go rename to pkg/queue/tenant_queues_test.go index 626fbc57cfe20..95f2a67963aa7 100644 --- a/pkg/scheduler/queue/tenant_queues_test.go +++ b/pkg/queue/tenant_queues_test.go @@ -22,10 +22,10 @@ func TestQueues(t *testing.T) { assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) - uq.addQuerierConnection("querier-1") - uq.addQuerierConnection("querier-2") + uq.addConsumerToConnection("querier-1") + uq.addConsumerToConnection("querier-2") - q, u, lastUserIndex := uq.getNextQueueForQuerier(-1, "querier-1") + q, u, lastUserIndex := uq.getNextQueueForConsumer(-1, "querier-1") assert.Nil(t, q) assert.Equal(t, "", u) @@ -71,7 +71,7 @@ func TestQueues(t *testing.T) { uq.deleteQueue("four") assert.NoError(t, isConsistent(uq)) - q, _, _ = uq.getNextQueueForQuerier(lastUserIndex, "querier-1") + q, _, _ = uq.getNextQueueForConsumer(lastUserIndex, "querier-1") assert.Nil(t, q) } @@ -80,8 +80,8 @@ func TestQueuesOnTerminatingQuerier(t *testing.T) { assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) - uq.addQuerierConnection("querier-1") - uq.addQuerierConnection("querier-2") + uq.addConsumerToConnection("querier-1") + uq.addConsumerToConnection("querier-2") // Add queues: [one, two] qOne := getOrAdd(t, uq, "one", 0) @@ -91,7 +91,7 @@ func TestQueuesOnTerminatingQuerier(t *testing.T) { // After notify shutdown for querier-2, it's expected to own no queue. uq.notifyQuerierShutdown("querier-2") - q, u, _ := uq.getNextQueueForQuerier(-1, "querier-2") + q, u, _ := uq.getNextQueueForConsumer(-1, "querier-2") assert.Nil(t, q) assert.Equal(t, "", u) @@ -99,8 +99,8 @@ func TestQueuesOnTerminatingQuerier(t *testing.T) { confirmOrderForQuerier(t, uq, "querier-1", -1, qOne, qTwo, qOne, qTwo) // After disconnecting querier-2, it's expected to own no queue. - uq.removeQuerier("querier-2") - q, u, _ = uq.getNextQueueForQuerier(-1, "querier-2") + uq.removeConsumer("querier-2") + q, u, _ = uq.getNextQueueForConsumer(-1, "querier-2") assert.Nil(t, q) assert.Equal(t, "", u) } @@ -117,10 +117,10 @@ func TestQueuesWithQueriers(t *testing.T) { // Add some queriers. for ix := 0; ix < queriers; ix++ { qid := fmt.Sprintf("querier-%d", ix) - uq.addQuerierConnection(qid) + uq.addConsumerToConnection(qid) // No querier has any queues yet. - q, u, _ := uq.getNextQueueForQuerier(-1, qid) + q, u, _ := uq.getNextQueueForConsumer(-1, qid) assert.Nil(t, q) assert.Equal(t, "", u) } @@ -133,7 +133,7 @@ func TestQueuesWithQueriers(t *testing.T) { getOrAdd(t, uq, uid, maxQueriersPerUser) // Verify it has maxQueriersPerUser queriers assigned now. - qs := uq.mapping.GetByKey(uid).queriers + qs := uq.mapping.GetByKey(uid).consumers assert.Equal(t, maxQueriersPerUser, len(qs)) } @@ -146,7 +146,7 @@ func TestQueuesWithQueriers(t *testing.T) { lastUserIndex := StartIndex for { - _, _, newIx := uq.getNextQueueForQuerier(lastUserIndex, qid) + _, _, newIx := uq.getNextQueueForConsumer(lastUserIndex, qid) if newIx < lastUserIndex { break } @@ -199,18 +199,18 @@ func TestQueuesConsistency(t *testing.T) { assert.NotNil(t, uq.getOrAddQueue(generateTenant(r), generateActor(r), 3)) case 1: qid := generateQuerier(r) - _, _, luid := uq.getNextQueueForQuerier(lastUserIndexes[qid], qid) + _, _, luid := uq.getNextQueueForConsumer(lastUserIndexes[qid], qid) lastUserIndexes[qid] = luid case 2: uq.deleteQueue(generateTenant(r)) case 3: q := generateQuerier(r) - uq.addQuerierConnection(q) + uq.addConsumerToConnection(q) conns[q]++ case 4: q := generateQuerier(r) if conns[q] > 0 { - uq.removeQuerierConnection(q, time.Now()) + uq.removeConsumerConnection(q, time.Now()) conns[q]-- } case 5: @@ -238,8 +238,8 @@ func TestQueues_ForgetDelay(t *testing.T) { // 3 queriers open 2 connections each. for i := 1; i <= 3; i++ { - uq.addQuerierConnection(fmt.Sprintf("querier-%d", i)) - uq.addQuerierConnection(fmt.Sprintf("querier-%d", i)) + uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i)) + uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i)) } // Add user queues. @@ -253,12 +253,12 @@ func TestQueues_ForgetDelay(t *testing.T) { require.NotEmpty(t, querier1Users) // Gracefully shutdown querier-1. - uq.removeQuerierConnection("querier-1", now.Add(20*time.Second)) - uq.removeQuerierConnection("querier-1", now.Add(21*time.Second)) + uq.removeConsumerConnection("querier-1", now.Add(20*time.Second)) + uq.removeConsumerConnection("querier-1", now.Add(21*time.Second)) uq.notifyQuerierShutdown("querier-1") // We expect querier-1 has been removed. - assert.NotContains(t, uq.queriers, "querier-1") + assert.NotContains(t, uq.consumers, "querier-1") assert.NoError(t, isConsistent(uq)) // We expect querier-1 users have been shuffled to other queriers. @@ -267,8 +267,8 @@ func TestQueues_ForgetDelay(t *testing.T) { } // Querier-1 reconnects. - uq.addQuerierConnection("querier-1") - uq.addQuerierConnection("querier-1") + uq.addConsumerToConnection("querier-1") + uq.addConsumerToConnection("querier-1") // We expect the initial querier-1 users have got back to querier-1. for _, userID := range querier1Users { @@ -278,11 +278,11 @@ func TestQueues_ForgetDelay(t *testing.T) { } // Querier-1 abruptly terminates (no shutdown notification received). - uq.removeQuerierConnection("querier-1", now.Add(40*time.Second)) - uq.removeQuerierConnection("querier-1", now.Add(41*time.Second)) + uq.removeConsumerConnection("querier-1", now.Add(40*time.Second)) + uq.removeConsumerConnection("querier-1", now.Add(41*time.Second)) // We expect querier-1 has NOT been removed. - assert.Contains(t, uq.queriers, "querier-1") + assert.Contains(t, uq.consumers, "querier-1") assert.NoError(t, isConsistent(uq)) // We expect the querier-1 users have not been shuffled to other queriers. @@ -293,9 +293,9 @@ func TestQueues_ForgetDelay(t *testing.T) { } // Try to forget disconnected queriers, but querier-1 forget delay hasn't passed yet. - uq.forgetDisconnectedQueriers(now.Add(90 * time.Second)) + uq.forgetDisconnectedConsumers(now.Add(90 * time.Second)) - assert.Contains(t, uq.queriers, "querier-1") + assert.Contains(t, uq.consumers, "querier-1") assert.NoError(t, isConsistent(uq)) for _, userID := range querier1Users { @@ -305,9 +305,9 @@ func TestQueues_ForgetDelay(t *testing.T) { } // Try to forget disconnected queriers. This time querier-1 forget delay has passed. - uq.forgetDisconnectedQueriers(now.Add(105 * time.Second)) + uq.forgetDisconnectedConsumers(now.Add(105 * time.Second)) - assert.NotContains(t, uq.queriers, "querier-1") + assert.NotContains(t, uq.consumers, "querier-1") assert.NoError(t, isConsistent(uq)) // We expect querier-1 users have been shuffled to other queriers. @@ -330,8 +330,8 @@ func TestQueues_ForgetDelay_ShouldCorrectlyHandleQuerierReconnectingBeforeForget // 3 queriers open 2 connections each. for i := 1; i <= 3; i++ { - uq.addQuerierConnection(fmt.Sprintf("querier-%d", i)) - uq.addQuerierConnection(fmt.Sprintf("querier-%d", i)) + uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i)) + uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i)) } // Add user queues. @@ -345,11 +345,11 @@ func TestQueues_ForgetDelay_ShouldCorrectlyHandleQuerierReconnectingBeforeForget require.NotEmpty(t, querier1Users) // Querier-1 abruptly terminates (no shutdown notification received). - uq.removeQuerierConnection("querier-1", now.Add(40*time.Second)) - uq.removeQuerierConnection("querier-1", now.Add(41*time.Second)) + uq.removeConsumerConnection("querier-1", now.Add(40*time.Second)) + uq.removeConsumerConnection("querier-1", now.Add(41*time.Second)) // We expect querier-1 has NOT been removed. - assert.Contains(t, uq.queriers, "querier-1") + assert.Contains(t, uq.consumers, "querier-1") assert.NoError(t, isConsistent(uq)) // We expect the querier-1 users have not been shuffled to other queriers. @@ -360,13 +360,13 @@ func TestQueues_ForgetDelay_ShouldCorrectlyHandleQuerierReconnectingBeforeForget } // Try to forget disconnected queriers, but querier-1 forget delay hasn't passed yet. - uq.forgetDisconnectedQueriers(now.Add(90 * time.Second)) + uq.forgetDisconnectedConsumers(now.Add(90 * time.Second)) // Querier-1 reconnects. - uq.addQuerierConnection("querier-1") - uq.addQuerierConnection("querier-1") + uq.addConsumerToConnection("querier-1") + uq.addConsumerToConnection("querier-1") - assert.Contains(t, uq.queriers, "querier-1") + assert.Contains(t, uq.consumers, "querier-1") assert.NoError(t, isConsistent(uq)) // We expect the querier-1 users have not been shuffled to other queriers. @@ -377,9 +377,9 @@ func TestQueues_ForgetDelay_ShouldCorrectlyHandleQuerierReconnectingBeforeForget } // Try to forget disconnected queriers far in the future, but there's no disconnected querier. - uq.forgetDisconnectedQueriers(now.Add(200 * time.Second)) + uq.forgetDisconnectedConsumers(now.Add(200 * time.Second)) - assert.Contains(t, uq.queriers, "querier-1") + assert.Contains(t, uq.consumers, "querier-1") assert.NoError(t, isConsistent(uq)) for _, userID := range querier1Users { @@ -414,7 +414,7 @@ func confirmOrderForQuerier(t *testing.T, uq *tenantQueues, querier string, last t.Helper() var n Queue for _, q := range qs { - n, _, lastUserIndex = uq.getNextQueueForQuerier(lastUserIndex, querier) + n, _, lastUserIndex = uq.getNextQueueForConsumer(lastUserIndex, querier) assert.Equal(t, q, n) assert.NoError(t, isConsistent(uq)) } @@ -422,7 +422,7 @@ func confirmOrderForQuerier(t *testing.T, uq *tenantQueues, querier string, last } func isConsistent(uq *tenantQueues) error { - if len(uq.sortedQueriers) != len(uq.queriers) { + if len(uq.sortedConsumers) != len(uq.consumers) { return fmt.Errorf("inconsistent number of sorted queriers and querier connections") } @@ -441,16 +441,16 @@ func isConsistent(uq *tenantQueues) error { uc++ - if q.maxQueriers == 0 && q.queriers != nil { + if q.maxQueriers == 0 && q.consumers != nil { return fmt.Errorf("user %s has queriers, but maxQueriers=0", u) } - if q.maxQueriers > 0 && len(uq.sortedQueriers) <= q.maxQueriers && q.queriers != nil { + if q.maxQueriers > 0 && len(uq.sortedConsumers) <= q.maxQueriers && q.consumers != nil { return fmt.Errorf("user %s has queriers set despite not enough queriers available", u) } - if q.maxQueriers > 0 && len(uq.sortedQueriers) > q.maxQueriers && len(q.queriers) != q.maxQueriers { - return fmt.Errorf("user %s has incorrect number of queriers, expected=%d, got=%d", u, len(q.queriers), q.maxQueriers) + if q.maxQueriers > 0 && len(uq.sortedConsumers) > q.maxQueriers && len(q.consumers) != q.maxQueriers { + return fmt.Errorf("user %s has incorrect number of queriers, expected=%d, got=%d", u, len(q.consumers), q.maxQueriers) } } @@ -466,12 +466,12 @@ func getUsersByQuerier(queues *tenantQueues, querierID string) []string { var userIDs []string for _, userID := range queues.mapping.Keys() { q := queues.mapping.GetByKey(userID) - if q.queriers == nil { + if q.consumers == nil { // If it's nil then all queriers can handle this user. userIDs = append(userIDs, userID) continue } - if _, ok := q.queriers[querierID]; ok { + if _, ok := q.consumers[querierID]; ok { userIDs = append(userIDs, userID) } } @@ -481,14 +481,14 @@ func getUsersByQuerier(queues *tenantQueues, querierID string) []string { func TestShuffleQueriers(t *testing.T) { allQueriers := []string{"a", "b", "c", "d", "e"} - require.Nil(t, shuffleQueriersForTenants(12345, 10, allQueriers, nil)) - require.Nil(t, shuffleQueriersForTenants(12345, len(allQueriers), allQueriers, nil)) + require.Nil(t, shuffleConsumersForTenants(12345, 10, allQueriers, nil)) + require.Nil(t, shuffleConsumersForTenants(12345, len(allQueriers), allQueriers, nil)) - r1 := shuffleQueriersForTenants(12345, 3, allQueriers, nil) + r1 := shuffleConsumersForTenants(12345, 3, allQueriers, nil) require.Equal(t, 3, len(r1)) // Same input produces same output. - r2 := shuffleQueriersForTenants(12345, 3, allQueriers, nil) + r2 := shuffleConsumersForTenants(12345, 3, allQueriers, nil) require.Equal(t, 3, len(r2)) require.Equal(t, r1, r2) } @@ -510,7 +510,7 @@ func TestShuffleQueriersCorrectness(t *testing.T) { toSelect = 3 } - selected := shuffleQueriersForTenants(r.Int63(), toSelect, allSortedQueriers, nil) + selected := shuffleConsumersForTenants(r.Int63(), toSelect, allSortedQueriers, nil) require.Equal(t, toSelect, len(selected)) diff --git a/pkg/scheduler/queue/treequeue.go b/pkg/queue/treequeue.go similarity index 100% rename from pkg/scheduler/queue/treequeue.go rename to pkg/queue/treequeue.go diff --git a/pkg/scheduler/queue/treequeue_test.go b/pkg/queue/treequeue_test.go similarity index 100% rename from pkg/scheduler/queue/treequeue_test.go rename to pkg/queue/treequeue_test.go diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index ef2a1b5bbeea9..8dfbc5eb777c0 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -29,7 +29,7 @@ import ( "google.golang.org/grpc" "github.com/grafana/loki/pkg/lokifrontend/frontend/v2/frontendv2pb" - "github.com/grafana/loki/pkg/scheduler/queue" + "github.com/grafana/loki/pkg/queue" "github.com/grafana/loki/pkg/scheduler/schedulerpb" "github.com/grafana/loki/pkg/util" lokigrpc "github.com/grafana/loki/pkg/util/httpgrpc" @@ -144,7 +144,7 @@ func NewScheduler(cfg Config, limits Limits, log log.Logger, ringManager *lokiri s.connectedQuerierClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ Name: "cortex_query_scheduler_connected_querier_clients", Help: "Number of querier worker clients currently connected to the query-scheduler.", - }, s.requestQueue.GetConnectedQuerierWorkersMetric) + }, s.requestQueue.GetConnectedConsumersMetric) s.connectedFrontendClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ Name: "cortex_query_scheduler_connected_frontend_clients", Help: "Number of query-frontend worker clients currently connected to the query-scheduler.", @@ -404,8 +404,8 @@ func (s *Scheduler) QuerierLoop(querier schedulerpb.SchedulerForQuerier_QuerierL querierID := resp.GetQuerierID() level.Debug(s.log).Log("msg", "querier connected", "querier", querierID) - s.requestQueue.RegisterQuerierConnection(querierID) - defer s.requestQueue.UnregisterQuerierConnection(querierID) + s.requestQueue.RegisterConsumerConnection(querierID) + defer s.requestQueue.UnregisterConsumerConnection(querierID) lastIndex := queue.StartIndex @@ -463,7 +463,7 @@ func (s *Scheduler) QuerierLoop(querier schedulerpb.SchedulerForQuerier_QuerierL func (s *Scheduler) NotifyQuerierShutdown(_ context.Context, req *schedulerpb.NotifyQuerierShutdownRequest) (*schedulerpb.NotifyQuerierShutdownResponse, error) { level.Debug(s.log).Log("msg", "received shutdown notification from querier", "querier", req.GetQuerierID()) - s.requestQueue.NotifyQuerierShutdown(req.GetQuerierID()) + s.requestQueue.NotifyConsumerShutdown(req.GetQuerierID()) return &schedulerpb.NotifyQuerierShutdownResponse{}, nil } diff --git a/pkg/util/active_user.go b/pkg/util/active_user.go index 3e495b9b84c04..08b327df9336c 100644 --- a/pkg/util/active_user.go +++ b/pkg/util/active_user.go @@ -128,3 +128,13 @@ func (s *ActiveUsersCleanupService) iteration(_ context.Context) error { } return nil } + +func (s *ActiveUsersCleanupService) ActiveUsers() []string { + s.activeUsers.mu.RLock() + defer s.activeUsers.mu.RUnlock() + users := make([]string, 0, len(s.activeUsers.timestamps)) + for u := range s.activeUsers.timestamps { + users = append(users, u) + } + return users +} From cb399523091c980141ea19d145c69ac60946644d Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Fri, 20 Oct 2023 16:04:13 +0200 Subject: [PATCH 07/33] Deprecated/Deleted Config Checker tool (#10977) **What this PR does / why we need it**: This PR adds a new _deprecated/deleted config checker_ tool to allow users upgrading from previous Loki versions to check their Loki config and runtime config for deprecated and deleted options, as well as providing them with alternatives and reasons why those configs were deprecated/deleted. Mimir provides a [tool][1] that allows users to migrate their old configs to new ones. Having that is our future goal but the new tool in this PR is a good middle ground for the short-mid term. We also update the checklist in the PR template to ask contributors to update the tool source of truth if the contributor is deprecating or deleting a config. Here's an example for how to use the tool: ```bash go run tools/deprecated-config-checker/main.go \ -config.file tools/deprecated-config-checker/test-fixtures/config.yaml \ -runtime-config.file tools/deprecated-config-checker/test-fixtures/runtime-config.yaml ``` And this is an example output: ![image](https://github.com/grafana/loki/assets/8354290/a86c0815-9875-4086-a267-9bbe06a6ee0e) ### Notes I think I've gone through all the deprecated and deleted configs. I cannot find the config pointed by the deprecated `ruler.storage`. It states ``` ruler: ... # Deprecated: Use -ruler-storage. CLI flags and their respective YAML config # options instead. storage: ... ``` I couldn't find that `-ruler-storage` cli flag nor the YAML definition. [1]: https://grafana.com/docs/mimir/latest/manage/tools/mimirtool/#convert --- .github/pull_request_template.md | 1 + tools/deprecated-config-checker/README.md | 21 ++ .../checker/checker.go | 307 ++++++++++++++++++ .../checker/checker_test.go | 168 ++++++++++ .../deleted-config.yaml | 33 ++ .../deprecated-config.yaml | 68 ++++ tools/deprecated-config-checker/main.go | 75 +++++ .../test-fixtures/config.yaml | 130 ++++++++ .../test-fixtures/runtime-config.yaml | 27 ++ 9 files changed, 830 insertions(+) create mode 100644 tools/deprecated-config-checker/README.md create mode 100644 tools/deprecated-config-checker/checker/checker.go create mode 100644 tools/deprecated-config-checker/checker/checker_test.go create mode 100644 tools/deprecated-config-checker/deleted-config.yaml create mode 100644 tools/deprecated-config-checker/deprecated-config.yaml create mode 100644 tools/deprecated-config-checker/main.go create mode 100644 tools/deprecated-config-checker/test-fixtures/config.yaml create mode 100644 tools/deprecated-config-checker/test-fixtures/runtime-config.yaml diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c0f7f53682633..3db779ecd7dcc 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -13,3 +13,4 @@ Fixes # - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) +- [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. \ No newline at end of file diff --git a/tools/deprecated-config-checker/README.md b/tools/deprecated-config-checker/README.md new file mode 100644 index 0000000000000..6b9f89918e2ce --- /dev/null +++ b/tools/deprecated-config-checker/README.md @@ -0,0 +1,21 @@ +# Deprecated Config Checker + +This script can check your configuration files for deprecated and deleted options. + +## Usage + +Run the script with `-help` for a list of options. + +### Example + +```bash +go run tools/deprecated-config-checker/main.go \ + -config.file tools/deprecated-config-checker/test-fixtures/config.yaml \ + -runtime-config.file tools/deprecated-config-checker/test-fixtures/runtime-config.yaml +``` + +## Adding a new config deprecation or deletion? + +Add deprecations to the `deprecated-config.yaml` file, and deletions to the `deleted-config.yaml`. +Then, update the `test-fixtures/config.yaml` and `test-fixtures/runtime-config.yaml` files as well as +the tests in `checker/checker_test.go`. diff --git a/tools/deprecated-config-checker/checker/checker.go b/tools/deprecated-config-checker/checker/checker.go new file mode 100644 index 0000000000000..5651ab49bbe82 --- /dev/null +++ b/tools/deprecated-config-checker/checker/checker.go @@ -0,0 +1,307 @@ +package checker + +import ( + "flag" + "fmt" + "os" + "strings" + + "gopkg.in/yaml.v3" +) + +const ( + deprecatedValuesField = "_deprecated" + messageField = "_msg" + + defaultDeprecatesFilePath = "tools/deprecated-config-checker/deprecated-config.yaml" + defaultDeletesFilePath = "tools/deprecated-config-checker/deleted-config.yaml" + + configRequiredErrorMsg = "config.file or runtime-config.file are required" +) + +type Config struct { + DeprecatesFile string + DeletesFile string + ConfigFile string + RuntimeConfigFile string +} + +func (c *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&c.DeprecatesFile, "deprecates-file", defaultDeprecatesFilePath, "YAML file with deprecated configs") + f.StringVar(&c.DeletesFile, "deletes-file", defaultDeletesFilePath, "YAML file with deleted configs") + f.StringVar(&c.ConfigFile, "config.file", "", "User-defined config file to validate") + f.StringVar(&c.RuntimeConfigFile, "runtime-config.file", "", "User-defined runtime config file to validate") +} + +func (c *Config) Validate() error { + if c.ConfigFile == "" && c.RuntimeConfigFile == "" { + return fmt.Errorf(configRequiredErrorMsg) + } + return nil +} + +type RawYaml map[string]interface{} + +type Checker struct { + config RawYaml + runtimeConfig RawYaml + + deprecates RawYaml + deletes RawYaml +} + +func loadYAMLFile(path string) (RawYaml, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var out RawYaml + if err := yaml.Unmarshal(data, &out); err != nil { + return nil, err + } + + return out, nil +} + +func NewChecker(cfg Config) (*Checker, error) { + deprecates, err := loadYAMLFile(cfg.DeprecatesFile) + if err != nil { + return nil, fmt.Errorf("failed to read deprecates YAML: %w", err) + } + + deletes, err := loadYAMLFile(cfg.DeletesFile) + if err != nil { + return nil, fmt.Errorf("failed to read deletes YAML: %w", err) + } + + var config RawYaml + if cfg.ConfigFile != "" { + config, err = loadYAMLFile(cfg.ConfigFile) + if err != nil { + return nil, fmt.Errorf("failed to read config YAML: %w", err) + } + } + + var runtimeConfig RawYaml + if cfg.RuntimeConfigFile != "" { + runtimeConfig, err = loadYAMLFile(cfg.RuntimeConfigFile) + if err != nil { + return nil, fmt.Errorf("failed to read runtime config YAML: %w", err) + } + } + + return &Checker{ + config: config, + runtimeConfig: runtimeConfig, + deprecates: deprecates, + deletes: deletes, + }, nil +} + +func (c *Checker) CheckConfigDeprecated() []DeprecationNotes { + return checkConfigDeprecated(c.deprecates, c.config) +} + +func (c *Checker) CheckConfigDeleted() []DeprecationNotes { + return checkConfigDeprecated(c.deletes, c.config) +} + +func (c *Checker) CheckRuntimeConfigDeprecated() []DeprecationNotes { + return checkRuntimeConfigDeprecated(c.deprecates, c.runtimeConfig) +} + +func (c *Checker) CheckRuntimeConfigDeleted() []DeprecationNotes { + return checkRuntimeConfigDeprecated(c.deletes, c.runtimeConfig) +} + +type deprecationAnnotation struct { + DeprecatedValues []string + Msg string +} + +func getDeprecationAnnotation(value interface{}) (deprecationAnnotation, bool) { + // If the value is a string, return it as the message + if msg, is := value.(string); is { + return deprecationAnnotation{ + Msg: msg, + }, true + } + + // If the value is a map, check if it has a _msg field + if inner, is := value.(RawYaml); is { + msg, exists := inner[messageField] + if !exists { + return deprecationAnnotation{}, false + } + + var deprecatedValues []string + if v, exists := inner[deprecatedValuesField]; exists { + asIfcSlice := v.([]interface{}) + deprecatedValues = make([]string, len(asIfcSlice)) + for i, v := range asIfcSlice { + deprecatedValues[i] = v.(string) + } + } + + return deprecationAnnotation{ + Msg: msg.(string), + DeprecatedValues: deprecatedValues, + }, true + } + + return deprecationAnnotation{}, false +} + +type DeprecationNotes struct { + deprecationAnnotation + ItemPath string + ItemValues []string +} + +func (d DeprecationNotes) String() string { + var sb strings.Builder + + sb.WriteString(d.ItemPath) + if len(d.ItemValues) > 0 { + sb.WriteString(" = ") + if len(d.ItemValues) == 1 { + sb.WriteString(d.ItemValues[0]) + } else { + sb.WriteString("[") + sb.WriteString(strings.Join(d.ItemValues, ", ")) + sb.WriteString("]") + } + } + sb.WriteString(": " + d.Msg) + if len(d.DeprecatedValues) > 0 { + sb.WriteString("\n\t|- " + "Deprecated values: ") + sb.WriteString(strings.Join(d.DeprecatedValues, ", ")) + } + + return sb.String() +} + +func appenToPath(path, key string) string { + if path == "" { + return key + } + return path + "." + key +} + +func getLimitsConfig(in RawYaml) RawYaml { + limits, exists := in["limits_config"] + if !exists { + return nil + } + return limits.(RawYaml) +} + +func getOverrides(runtimeConf RawYaml) RawYaml { + overrides, exists := runtimeConf["overrides"] + if !exists { + return nil + } + return overrides.(RawYaml) +} + +func checkRuntimeConfigDeprecated(deprecates, runtimeConfig RawYaml) []DeprecationNotes { + deprecatedLimits := getLimitsConfig(deprecates) + if deprecatedLimits == nil { + return nil + } + + overrides := getOverrides(runtimeConfig) + if overrides == nil { + return nil + } + + // We check the deprecated fields for each tenant + var deprecations []DeprecationNotes + for tenant, tenantOverrides := range overrides { + tenantDeprecations := checkConfigDeprecated(deprecatedLimits, tenantOverrides.(RawYaml)) + for i := range tenantDeprecations { + tenantPath := appenToPath("overrides", tenant) + tenantDeprecations[i].ItemPath = appenToPath(tenantPath, tenantDeprecations[i].ItemPath) + } + deprecations = append(deprecations, tenantDeprecations...) + } + + return deprecations +} + +func checkConfigDeprecated(deprecates, config RawYaml) []DeprecationNotes { + return enumerateDeprecatesFields(deprecates, config, "", []DeprecationNotes{}) +} + +func enumerateDeprecatesFields(deprecates, input RawYaml, rootPath string, deprecations []DeprecationNotes) []DeprecationNotes { + for key, deprecate := range deprecates { + inputValue, exists := input[key] + if !exists { + // If this item is not set in the config, we can skip it. + continue + } + + path := appenToPath(rootPath, key) + + note, isDeprecatedNote := getDeprecationAnnotation(deprecate) + if isDeprecatedNote { + var inputValueStrSlice []string + switch v := inputValue.(type) { + case []interface{}: + for _, val := range v { + inputValueStrSlice = append(inputValueStrSlice, val.(string)) + } + case string: + inputValueStrSlice = []string{v} + case int, int32, int64, uint, uint32, uint64, float32, float64: + inputValueStrSlice = []string{fmt.Sprintf("%d", v)} + case bool: + inputValueStrSlice = []string{fmt.Sprintf("%t", v)} + } + + // If there are no specific values deprecated, the whole config is deprecated. + // Otherwise, look for the config value in the list of deprecated values. + var inputDeprecated bool + if len(note.DeprecatedValues) == 0 { + inputDeprecated = true + } else { + // If the config is a list, check each item. + FindDeprecatedValues: + for _, v := range note.DeprecatedValues { + for _, itemValueStr := range inputValueStrSlice { + if v == itemValueStr { + inputDeprecated = true + break FindDeprecatedValues + } + } + } + } + + if inputDeprecated { + deprecations = append(deprecations, DeprecationNotes{ + deprecationAnnotation: note, + ItemPath: path, + ItemValues: inputValueStrSlice, + }) + continue + } + } + + // To this point, the deprecate item is not a leaf, so we need to recurse into it. + if deprecateYaml, is := deprecate.(RawYaml); is { + switch v := inputValue.(type) { + case RawYaml: + deprecations = enumerateDeprecatesFields(deprecateYaml, v, path, deprecations) + case []interface{}: + // If the config is a list, recurse into each item. + for i, item := range v { + itemYaml := item.(RawYaml) + deprecations = enumerateDeprecatesFields(deprecateYaml, itemYaml, appenToPath(path, fmt.Sprintf("[%d]", i)), deprecations) + } + } + } + } + + return deprecations +} diff --git a/tools/deprecated-config-checker/checker/checker_test.go b/tools/deprecated-config-checker/checker/checker_test.go new file mode 100644 index 0000000000000..929166ed4aa7d --- /dev/null +++ b/tools/deprecated-config-checker/checker/checker_test.go @@ -0,0 +1,168 @@ +package checker + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + deprecatesFilePath = "../deprecated-config.yaml" + deletesFilePath = "../deleted-config.yaml" + configPath = "../test-fixtures/config.yaml" + runtimeConfigPath = "../test-fixtures/runtime-config.yaml" +) + +var ( + expectedConfigDeletes = []string{ + "ingester.max_transfer_retries", + "querier.engine.timeout", + "query_range.split_queries_by_interval", + "query_range.forward_headers_list", + "frontend_worker.parallelism", + "frontend_worker.match_max_concurrent", + "common.storage.s3.sse_encryption", + "ruler.storage.s3.sse_encryption", + "storage_config.boltdb_shipper.use_boltdb_shipper_as_backup", + "storage_config.aws.sse_encryption", + "storage_config.s3.sse_encryption", + } + + expectedConfigDeprecates = []string{ + "ruler.remote_write.client", + "compactor.deletion_mode", + "index_gateway.ring.replication_factor", + "storage_config.bigtable", + "storage_config.cassandra", + "storage_config.boltdb", + "storage_config.grpc_store", + "storage_config.aws.dynamodb", + "chunk_store_config.write_dedupe_cache_config", + "chunk_store_config.max_look_back_period", + "limits_config.unordered_writes", + "limits_config.ruler_evaluation_delay_duration", + "limits_config.ruler_remote_write_url", + "limits_config.ruler_remote_write_timeout", + "limits_config.ruler_remote_write_headers", + "limits_config.ruler_remote_write_relabel_configs", + "limits_config.ruler_remote_write_queue_capacity", + "limits_config.ruler_remote_write_queue_min_shards", + "limits_config.ruler_remote_write_queue_max_shards", + "limits_config.ruler_remote_write_queue_max_samples_per_send", + "limits_config.ruler_remote_write_queue_batch_send_deadline", + "limits_config.ruler_remote_write_queue_min_backoff", + "limits_config.ruler_remote_write_queue_max_backoff", + "limits_config.ruler_remote_write_queue_retry_on_ratelimit", + "limits_config.ruler_remote_write_sigv4_config", + "limits_config.per_tenant_override_config", + "limits_config.per_tenant_override_period", + "limits_config.allow_deletes", + "schema_config.configs.[1].store", + "schema_config.configs.[1].object_store", + "schema_config.configs.[2].store", + "schema_config.configs.[2].object_store", + "schema_config.configs.[3].store", + "schema_config.configs.[3].object_store", + "schema_config.configs.[4].store", + "schema_config.configs.[4].object_store", + "schema_config.configs.[5].store", + "schema_config.configs.[5].object_store", + "schema_config.configs.[6].store", + "schema_config.configs.[6].object_store", + "schema_config.configs.[7].store", + "schema_config.configs.[7].object_store", + "schema_config.configs.[8].store", + "schema_config.configs.[8].object_store", + } + + expectedRuntimeConfigDeletes = []string{} + + expectedRuntimeConfigDeprecates = []string{ + "overrides.foo.unordered_writes", + "overrides.foo.ruler_evaluation_delay_duration", + "overrides.foo.ruler_remote_write_url", + "overrides.foo.ruler_remote_write_timeout", + "overrides.foo.ruler_remote_write_headers", + "overrides.foo.ruler_remote_write_relabel_configs", + "overrides.foo.ruler_remote_write_queue_capacity", + "overrides.foo.ruler_remote_write_queue_min_shards", + "overrides.foo.ruler_remote_write_queue_max_shards", + "overrides.foo.ruler_remote_write_queue_max_samples_per_send", + "overrides.foo.ruler_remote_write_queue_batch_send_deadline", + "overrides.foo.ruler_remote_write_queue_min_backoff", + "overrides.foo.ruler_remote_write_queue_max_backoff", + "overrides.foo.ruler_remote_write_queue_retry_on_ratelimit", + "overrides.foo.ruler_remote_write_sigv4_config", + "overrides.foo.per_tenant_override_config", + "overrides.foo.per_tenant_override_period", + "overrides.foo.allow_deletes", + "overrides.bar.unordered_writes", + "overrides.bar.ruler_evaluation_delay_duration", + "overrides.bar.ruler_remote_write_url", + "overrides.bar.ruler_remote_write_timeout", + "overrides.bar.ruler_remote_write_headers", + "overrides.bar.ruler_remote_write_relabel_configs", + "overrides.bar.ruler_remote_write_queue_capacity", + "overrides.bar.ruler_remote_write_queue_min_shards", + "overrides.bar.ruler_remote_write_queue_max_shards", + "overrides.bar.ruler_remote_write_queue_max_samples_per_send", + "overrides.bar.ruler_remote_write_queue_batch_send_deadline", + "overrides.bar.ruler_remote_write_queue_min_backoff", + "overrides.bar.ruler_remote_write_queue_max_backoff", + "overrides.bar.ruler_remote_write_queue_retry_on_ratelimit", + "overrides.bar.ruler_remote_write_sigv4_config", + "overrides.bar.per_tenant_override_config", + "overrides.bar.per_tenant_override_period", + "overrides.bar.allow_deletes", + } +) + +func TestConfigDeprecatesAndDeletes(t *testing.T) { + cfg := Config{ + DeprecatesFile: deprecatesFilePath, + DeletesFile: deletesFilePath, + ConfigFile: configPath, + } + + c, err := NewChecker(cfg) + require.NoError(t, err) + + deprecates := c.CheckConfigDeprecated() + deprecatesPaths := make([]string, 0, len(deprecates)) + for _, d := range deprecates { + deprecatesPaths = append(deprecatesPaths, d.ItemPath) + } + require.ElementsMatch(t, expectedConfigDeprecates, deprecatesPaths) + + deletes := c.CheckConfigDeleted() + deletesPaths := make([]string, 0, len(deletes)) + for _, d := range deletes { + deletesPaths = append(deletesPaths, d.ItemPath) + } + require.ElementsMatch(t, expectedConfigDeletes, deletesPaths) +} + +func TestRuntimeConfigDeprecatesAndDeletes(t *testing.T) { + cfg := Config{ + DeprecatesFile: deprecatesFilePath, + DeletesFile: deletesFilePath, + RuntimeConfigFile: runtimeConfigPath, + } + + c, err := NewChecker(cfg) + require.NoError(t, err) + + deprecates := c.CheckRuntimeConfigDeprecated() + deprecatesPaths := make([]string, 0, len(deprecates)) + for _, d := range deprecates { + deprecatesPaths = append(deprecatesPaths, d.ItemPath) + } + require.ElementsMatch(t, expectedRuntimeConfigDeprecates, deprecatesPaths) + + deletes := c.CheckRuntimeConfigDeleted() + deletesPaths := make([]string, 0, len(deletes)) + for _, d := range deletes { + deletesPaths = append(deletesPaths, d.ItemPath) + } + require.ElementsMatch(t, expectedRuntimeConfigDeletes, deletesPaths) +} diff --git a/tools/deprecated-config-checker/deleted-config.yaml b/tools/deprecated-config-checker/deleted-config.yaml new file mode 100644 index 0000000000000..9fa53d61cfbd4 --- /dev/null +++ b/tools/deprecated-config-checker/deleted-config.yaml @@ -0,0 +1,33 @@ +# This file should contain a list fo deleted config options. +# +# The syntax for this file is the same as the syntax for deprecated-config.yaml. + +ingester: + max_transfer_retries: "Enable the ingester WAL and rely on new ingesters to replay the WAL." + +querier: + engine: + timeout: "Use global or per-tenant query_timeout configuration from limits_config." + +query_range: + split_queries_by_interval: "Configure global or per-tenant split_queries_by_interval limit" + forward_headers_list: "This setting is removed without an alternative" + +frontend_worker: + parallelism: "Configure querier.max_concurrent to limit the max concurrent requests processed by the queriers." + match_max_concurrent: "Configure querier.max_concurrent to limit the max concurrent requests processed by the queriers." + +common: + storage: + s3: &s3_deletes + sse_encryption: "Encryption for all S3 buckets is now SSE-S3. Configure .sse field instead." + +ruler: + storage: + s3: *s3_deletes + +storage_config: + boltdb_shipper: + use_boltdb_shipper_as_backup: "Since TSDB is now stable and the recommended index type, the setting has become irrelevant and therefore was removed. The previous default value false is applied." + aws: *s3_deletes + s3: *s3_deletes diff --git a/tools/deprecated-config-checker/deprecated-config.yaml b/tools/deprecated-config-checker/deprecated-config.yaml new file mode 100644 index 0000000000000..873ef9ec76c85 --- /dev/null +++ b/tools/deprecated-config-checker/deprecated-config.yaml @@ -0,0 +1,68 @@ +# This file should contain a list fo deprecated config options. +# +# -- Syntax -- +# The value for a deprecated option should be a message that explains why the option is deprecated and what to use instead. E.g. +# storage_config: +# cassandra: "Cassandra is deprecated. Use any other non-deprecated storage instead. +# +# A given set of deprecated values can also be given though the _deprecated field along with a message in the _msg field. E.g. +# schema_config: +# configs: +# store: +# _deprecated: ["aws", "aws-dynamo", "gcp", "gcp-columnkey", "bigtable", "bigtable-hashed", "cassandra", "grpc"] +# _msg: "Use tsdb (preferred) or boltdb-shipper instead." +# +# Note that even though the configs in schema_config takes a list, here we specify the deprecated fields for each item in the list. + +ruler: + remote_write: + client: "Use clients instead." + +compactor: + deletion_mode: "Use global or per-tenant deletion_mode configuration from limits_config." + +index_gateway: + ring: + replication_factor: "Use global or per-tenant index_gateway_shard_size configuration from limits_config." + +schema_config: + configs: + store: + _deprecated: ["aws", "aws-dynamo", "gcp", "gcp-columnkey", "bigtable", "bigtable-hashed", "cassandra", "grpc"] + _msg: "Use tsdb (preferred) or boltdb-shipper instead." + object_store: + _deprecated: ["aws-dynamo", "gcp", "gcp-columnkey", "bigtable", "bigtable-hashed", "cassandra", "grpc"] + _msg: "Use any other non-deprecated storage instead." + +storage_config: + bigtable: "Bigtable is deprecated. Use any other non-deprecated storage instead." + cassandra: "Cassandra is deprecated. Use any other non-deprecated storage instead." + boltdb: "Boltdb is deprecated. Consider using boltdb_shipper or any other non-deprecated storage instead." + grpc_store: "gRPC store is deprecated. Use any other non-deprecated storage instead." + aws: + dynamodb: "DynamoDB is deprecated and will be eventually removed" + +chunk_store_config: + write_dedupe_cache_config: "Write dedupe cache is deprecated along with deprecated index types. Consider using TSDB index which does not require a write dedupe cache." + max_look_back_period: "Use global or per-tenant max_query_lookback configuration from limits_config." + +## NOTE: This will also be used to validate per-tenant overrides. +limits_config: + unordered_writes: "Will be eventually removed." + ruler_evaluation_delay_duration: "Will be eventually removed." + ruler_remote_write_url: "Use ruler_remote_write_config instead." + ruler_remote_write_timeout: "Use ruler_remote_write_config instead." + ruler_remote_write_headers: "Use ruler_remote_write_config instead." + ruler_remote_write_relabel_configs: "Use ruler_remote_write_config instead." + ruler_remote_write_queue_capacity: "Use ruler_remote_write_config instead." + ruler_remote_write_queue_min_shards: "Use ruler_remote_write_config instead." + ruler_remote_write_queue_max_shards: "Use ruler_remote_write_config instead." + ruler_remote_write_queue_max_samples_per_send: "Use ruler_remote_write_config instead." + ruler_remote_write_queue_batch_send_deadline: "Use ruler_remote_write_config instead." + ruler_remote_write_queue_min_backoff: "Use ruler_remote_write_config instead." + ruler_remote_write_queue_max_backoff: "Use ruler_remote_write_config instead." + ruler_remote_write_queue_retry_on_ratelimit: "Use ruler_remote_write_config instead." + ruler_remote_write_sigv4_config: "Use ruler_remote_write_config instead." + per_tenant_override_config: "Feature renamed to 'runtime configuration', flag deprecated in favor of runtime_config.file" + per_tenant_override_period: "Feature renamed to 'runtime configuration', flag deprecated in favor of runtime_config.period" + allow_deletes: "Use deletion_mode per tenant configuration instead." diff --git a/tools/deprecated-config-checker/main.go b/tools/deprecated-config-checker/main.go new file mode 100644 index 0000000000000..fc2f5cfda4f25 --- /dev/null +++ b/tools/deprecated-config-checker/main.go @@ -0,0 +1,75 @@ +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/fatih/color" + "github.com/grafana/loki/tools/deprecated-config-checker/checker" +) + +const upgradeGuideURL = "https://grafana.com/docs/loki/latest/setup/upgrade/" + +func RegisterFlags(f *flag.FlagSet) { + f.BoolVar(&color.NoColor, "no-color", false, "Disable color output") +} + +func main() { + var cfg checker.Config + + fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + RegisterFlags(fs) + cfg.RegisterFlags(fs) + if err := fs.Parse(os.Args[1:]); err != nil { + panic(err) + } + + if err := cfg.Validate(); err != nil { + panic(err) + } + + c, err := checker.NewChecker(cfg) + if err != nil { + panic(err) + } + + deprecates := c.CheckConfigDeprecated() + if len(deprecates) > 0 { + fmt.Print(color.YellowString("-- Deprecated Configs --\n\n")) + for _, d := range deprecates { + fmt.Printf("%s %s\n\n", color.YellowString("[*]"), d) + } + } + + deletes := c.CheckConfigDeleted() + if len(deletes) > 0 { + fmt.Print(color.RedString("-- Deleted Configs --\n\n")) + for _, d := range deletes { + fmt.Printf("%s %s\n\n", color.RedString("[-]"), d) + } + } + + deprecatesRuntime := c.CheckRuntimeConfigDeprecated() + if len(deprecatesRuntime) > 0 { + fmt.Print(color.YellowString("-- Deprecated Runtime Configs --\n\n")) + for _, d := range deprecatesRuntime { + fmt.Printf("%s %s\n\n", color.YellowString("[*]"), d) + } + } + + deletesRuntime := c.CheckRuntimeConfigDeleted() + if len(deletesRuntime) > 0 { + fmt.Print(color.RedString("-- Deleted Runtime Configs --\n\n")) + for _, d := range deletesRuntime { + fmt.Printf("%s %s\n\n", color.RedString("[-]"), d) + } + } + + if len(deprecates) > 0 || len(deletes) > 0 { + fmt.Printf("Please refer to the upgrade guide for more details: %s\n", upgradeGuideURL) + os.Exit(1) + } + + fmt.Printf("No deprecated or deleted configs found.\n") +} diff --git a/tools/deprecated-config-checker/test-fixtures/config.yaml b/tools/deprecated-config-checker/test-fixtures/config.yaml new file mode 100644 index 0000000000000..eaa713ff23e25 --- /dev/null +++ b/tools/deprecated-config-checker/test-fixtures/config.yaml @@ -0,0 +1,130 @@ +auth_enabled: false + +server: + http_listen_port: 3100 + +common: + path_prefix: /tmp/loki + storage: + filesystem: + chunks_directory: /tmp/loki/chunks + rules_directory: /tmp/loki/rules + s3: &s3_cfg + s3: "bucket.123abc.net" + sse_encryption: true # DELETED + +ingester: + chunk_target_size: 1000 + max_chunk_age: 1h30m + max_transfer_retries: 7 # DELETED + +querier: + query_ingesters_within: 1h + engine: + timeout: 1m # DELETED + max_look_back_period: 1m + +query_range: + align_queries_with_step: true + split_queries_by_interval: 15m # DELETED + forward_headers_list: ["foo", "bar"] # DELETED + +frontend_worker: + frontend_address: "123abc.net" + parallelism: 1 # DELETED + match_max_concurrent: false # DELETED + +index_gateway: + ring: + replication_factor: 2 # DEPRECATED + instance_interface_names: ["eth0", "eth1"] + +compactor: + working_directory: /tmp/loki/boltdb-shipper-active + deletion_mode: "delete" # DEPRECATED + +chunk_store_config: + cache_lookups_older_than: 1h + write_dedupe_cache_config: # DEPRECATED + default_validity: 30m + max_look_back_period: 1m # DEPRECATED + +ruler: + flush_period: 1s + storage: + s3: *s3_cfg + remote_write: + enabled: true + client: # DEPRECATED + url: "http://localhost:3100/api/prom/push" + +storage_config: + bigtable: # DEPRECATED + project: "my-project" + cassandra: # DEPRECATED + addresses: 'a.b.c.d:9042' + boltdb: # DEPRECATED + directory: /tmp/loki/boltdb + grpc_store: # DEPRECATED + server_address: "grpc.123abc.net" + boltdb_shipper: + active_index_directory: /tmp/loki/boltdb-shipper-active + use_boltdb_shipper_as_backup: true # DELETED + aws: + s3: "bucket.123abc.net" + sse_encryption: true # DELETED + dynamodb: # DEPRECATED + dynamodb_url: "dynamodb.123abc.net" + s3: *s3_cfg + +schema_config: + configs: + - from: 2020-10-10 + store: tsdb + object_store: aws + - from: 2020-10-11 + store: aws # DEPRECATED + object_store: aws-dynamo # DEPRECATED + - from: 2020-10-12 + store: aws-dynamo # DEPRECATED + object_store: aws-dynamo # DEPRECATED + - from: 2020-10-13 + store: gcp # DEPRECATED + object_store: gcp # DEPRECATED + - from: 2020-10-14 + store: gcp-columnkey # DEPRECATED + object_store: gcp-columnkey # DEPRECATED + - from: 2020-10-15 + store: bigtable # DEPRECATED + object_store: bigtable # DEPRECATED + - from: 2020-10-16 + store: bigtable-hashed # DEPRECATED + object_store: bigtable-hashed # DEPRECATED + - from: 2020-10-17 + store: cassandra # DEPRECATED + object_store: cassandra # DEPRECATED + - from: 2020-10-18 + store: grpc # DEPRECATED + object_store: grpc # DEPRECATED + +limits_config: + ingestion_rate_mb: 100 + unordered_writes: true # DEPRECATED + ruler_evaluation_delay_duration: 1m # DEPRECATED + ruler_remote_write_url: "push.123abc.net" # DEPRECATED + ruler_remote_write_timeout: 1m # DEPRECATED + ruler_remote_write_headers: ["foo", "bar"] # DEPRECATED + ruler_remote_write_relabel_configs: "foo" # DEPRECATED + ruler_remote_write_queue_capacity: 10 # DEPRECATED + ruler_remote_write_queue_min_shards: 10 # DEPRECATED + ruler_remote_write_queue_max_shards: 100 # DEPRECATED + ruler_remote_write_queue_max_samples_per_send: 50 # DEPRECATED + ruler_remote_write_queue_batch_send_deadline: 10m # DEPRECATED + ruler_remote_write_queue_min_backoff: 1m # DEPRECATED + ruler_remote_write_queue_max_backoff: 5m # DEPRECATED + ruler_remote_write_queue_retry_on_ratelimit: true # DEPRECATED + ruler_remote_write_sigv4_config: # DEPRECATED + region: "wherever" + per_tenant_override_config: ./overrides.yaml # DEPRECATED + per_tenant_override_period: 5s # DEPRECATED + allow_deletes: true # DEPRECATED diff --git a/tools/deprecated-config-checker/test-fixtures/runtime-config.yaml b/tools/deprecated-config-checker/test-fixtures/runtime-config.yaml new file mode 100644 index 0000000000000..9500ce719724a --- /dev/null +++ b/tools/deprecated-config-checker/test-fixtures/runtime-config.yaml @@ -0,0 +1,27 @@ +overrides: + "foo": &tenant_overrides + ingestion_rate_mb: 100 + unordered_writes: true # DEPRECATED + ruler_evaluation_delay_duration: 1m # DEPRECATED + ruler_remote_write_url: "push.123abc.net" # DEPRECATED + ruler_remote_write_timeout: 1m # DEPRECATED + ruler_remote_write_headers: [ "foo", "bar" ] # DEPRECATED + ruler_remote_write_relabel_configs: "foo" # DEPRECATED + ruler_remote_write_queue_capacity: 10 # DEPRECATED + ruler_remote_write_queue_min_shards: 10 # DEPRECATED + ruler_remote_write_queue_max_shards: 100 # DEPRECATED + ruler_remote_write_queue_max_samples_per_send: 50 # DEPRECATED + ruler_remote_write_queue_batch_send_deadline: 10m # DEPRECATED + ruler_remote_write_queue_min_backoff: 1m # DEPRECATED + ruler_remote_write_queue_max_backoff: 5m # DEPRECATED + ruler_remote_write_queue_retry_on_ratelimit: true # DEPRECATED + ruler_remote_write_sigv4_config: # DEPRECATED + region: "wherever" + per_tenant_override_config: ./overrides.yaml # DEPRECATED + per_tenant_override_period: 5s # DEPRECATED + allow_deletes: true # DEPRECATED + "bar": *tenant_overrides + +multi_kv_config: + primary: "" + mirror_enabled: null From 9d9b05ad408876d51a773ca68f926c0da712259d Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Fri, 20 Oct 2023 14:41:50 -0300 Subject: [PATCH 08/33] Bloomgateway: Use RUnlock to unlock RLock instead (#10984) **What this PR does / why we need it**: Change `Len()` method to use `RUnlock()` instead, since it was locked using `RLock()`. --- pkg/bloomgateway/bloomgateway.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index 2b920e270b694..a5b081c9a95d5 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -141,7 +141,7 @@ type pendingTasks SyncMap[ulid.ULID, Task] func (t *pendingTasks) Len() int { t.RLock() - defer t.Unlock() + defer t.RUnlock() return len(t.Map) } From b43e2539256cb2fe499c6f586b1231cc1a5e40d1 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Sun, 22 Oct 2023 21:28:21 +0200 Subject: [PATCH 09/33] chore(ksonnet): Simplify configuration of ingester deployment (#10542) Simplify deployment with ksonnet: Remove `stateful_ingesters` flag, because ingesters should always be deployed as StatefulSet with WAL (write ahead log) enabled. Signed-off-by: Christian Haudum --- CHANGELOG.md | 2 + .../ksonnet/enterprise-logs/main.libsonnet | 1 - production/ksonnet/loki/config.libsonnet | 22 ++- production/ksonnet/loki/ingester.libsonnet | 63 +++---- production/ksonnet/loki/loki.libsonnet | 3 - production/ksonnet/loki/multi-zone.libsonnet | 162 ++++++++---------- .../ksonnet/loki/rollout-operator.libsonnet | 70 ++++++++ production/ksonnet/loki/shipper.libsonnet | 2 - production/ksonnet/loki/wal.libsonnet | 45 ----- 9 files changed, 180 insertions(+), 190 deletions(-) create mode 100644 production/ksonnet/loki/rollout-operator.libsonnet delete mode 100644 production/ksonnet/loki/wal.libsonnet diff --git a/CHANGELOG.md b/CHANGELOG.md index 564d05a4929cf..247af303efc57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,8 @@ * [10784](https://github.com/grafana/loki/pull/10894) **slim-bean** Update index gateway client to use a headless service. +* [10542](https://github.com/grafana/loki/pull/10542) **chaudum**: Remove legacy deployment mode for ingester (Deployment, without WAL) and instead always run them as StatefulSet. + ## 2.9.2 (2023-10-16) ### All Changes diff --git a/production/ksonnet/enterprise-logs/main.libsonnet b/production/ksonnet/enterprise-logs/main.libsonnet index bbdb8ac9ba08a..ba83140f21a1f 100644 --- a/production/ksonnet/enterprise-logs/main.libsonnet +++ b/production/ksonnet/enterprise-logs/main.libsonnet @@ -62,7 +62,6 @@ loki { }, ingester_pvc_size: '50Gi', - stateful_ingesters: true, querier_pvc_size: '50Gi', stateful_queriers: true, diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet index 108ffc3292731..8450e524fd1ee 100644 --- a/production/ksonnet/loki/config.libsonnet +++ b/production/ksonnet/loki/config.libsonnet @@ -12,16 +12,20 @@ grpc_server_max_msg_size: 100 << 20, // 100MB - wal_enabled: true, query_scheduler_enabled: false, overrides_exporter_enabled: false, - // flags for running ingesters/queriers as a statefulset instead of deployment type. - // WAL enabled configurations automatically use statefulsets. - stateful_ingesters: false, ingester_pvc_size: '10Gi', ingester_pvc_class: 'fast', + ingester_data_disk_size: self.ingester_pvc_size, // keep backwards compatibility + ingester_data_disk_class: self.ingester_pvc_class, // keep backwards compatibility + + ingester_wal_disk_size: '150Gi', + ingester_wal_disk_class: 'fast', + + ingester_allow_multiple_replicas_on_same_node: false, + stateful_queriers: false, querier_pvc_size: '10Gi', querier_pvc_class: 'fast', @@ -80,10 +84,6 @@ topology_spread_max_skew: 1, }, - ingester_allow_multiple_replicas_on_same_node: false, - ingester_data_disk_size: '10Gi', - ingester_data_disk_class: 'fast', - // Bigtable variables bigtable_instance: error 'must specify bigtable instance', bigtable_project: error 'must specify bigtable project', @@ -231,6 +231,12 @@ chunk_idle_period: '15m', chunk_block_size: 262144, + wal+: { + enabled: true, + dir: '/loki/wal', + replay_memory_ceiling: '7GB', // should be set upto ~50% of available memory + }, + lifecycler: { ring: { heartbeat_timeout: '1m', diff --git a/production/ksonnet/loki/ingester.libsonnet b/production/ksonnet/loki/ingester.libsonnet index abdca90ee1fff..cd0d14ba29b87 100644 --- a/production/ksonnet/loki/ingester.libsonnet +++ b/production/ksonnet/loki/ingester.libsonnet @@ -4,22 +4,26 @@ local k = import 'ksonnet-util/kausal.libsonnet'; local pvc = k.core.v1.persistentVolumeClaim, local volumeMount = k.core.v1.volumeMount, local statefulSet = k.apps.v1.statefulSet, + local podDisruptionBudget = k.policy.v1.podDisruptionBudget, + + local name = 'ingester', // The ingesters should persist TSDB blocks and WAL on a persistent // volume in order to be crash resilient. - local ingester_data_pvc = - pvc.new() + + ingester_data_pvc:: + pvc.new('ingester-data') + pvc.mixin.spec.resources.withRequests({ storage: $._config.ingester_data_disk_size }) + pvc.mixin.spec.withAccessModes(['ReadWriteOnce']) + - pvc.mixin.spec.withStorageClassName($._config.ingester_data_disk_class) + - pvc.mixin.metadata.withName('ingester-data'), + pvc.mixin.spec.withStorageClassName($._config.ingester_data_disk_class), - newIngesterStatefulSet(name, container, with_anti_affinity=true):: - // local ingesterContainer = container + $.core.v1.container.withVolumeMountsMixin([ - // volumeMount.new('ingester-data', '/data'), - // ]); + ingester_wal_pvc:: + pvc.new('ingester-wal') + + pvc.mixin.spec.resources.withRequests({ storage: $._config.ingester_wal_disk_size }) + + pvc.mixin.spec.withAccessModes(['ReadWriteOnce']) + + pvc.mixin.spec.withStorageClassName($._config.ingester_wal_disk_class), - $.newLokiStatefulSet(name, 3, container, ingester_data_pvc) + + newIngesterStatefulSet(name, container, with_anti_affinity=true):: + $.newLokiStatefulSet(name, 3, container, [self.ingester_data_pvc, self.ingester_wal_pvc]) + // When the ingester needs to flush blocks to the storage, it may take quite a lot of time. // For this reason, we grant an high termination period (80 minutes). statefulSet.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800) + @@ -42,42 +46,19 @@ local k = import 'ksonnet-util/kausal.libsonnet'; container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port) + container.mixin.readinessProbe.withInitialDelaySeconds(15) + container.mixin.readinessProbe.withTimeoutSeconds(1) + - k.util.resourcesRequests('1', '5Gi') + - k.util.resourcesLimits('2', '10Gi') + + k.util.resourcesRequests('1', '7Gi') + + k.util.resourcesLimits('2', '14Gi') + container.withEnvMixin($._config.commonEnvs) + - if $._config.stateful_ingesters then - container.withVolumeMountsMixin([ - volumeMount.new('ingester-data', '/data'), - ]) else {}, + container.withVolumeMountsMixin([ + volumeMount.new('ingester-data', '/data'), + volumeMount.new('ingester-wal', $._config.loki.ingester.wal.dir), + ]), - local deployment = k.apps.v1.deployment, - - local name = 'ingester', - - ingester_deployment: if !$._config.stateful_ingesters then - deployment.new(name, 3, [$.ingester_container]) + - $.config_hash_mixin + - k.util.configVolumeMount('loki', '/etc/loki/config') + - k.util.configVolumeMount( - $._config.overrides_configmap_mount_name, - $._config.overrides_configmap_mount_path, - ) + - k.util.antiAffinity + - deployment.mixin.spec.withMinReadySeconds(60) + - deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(0) + - deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(1) + - deployment.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800) - else {}, - - ingester_statefulset: self.newIngesterStatefulSet('ingester', $.ingester_container, !$._config.ingester_allow_multiple_replicas_on_same_node), + ingester_statefulset: + self.newIngesterStatefulSet('ingester', $.ingester_container, !$._config.ingester_allow_multiple_replicas_on_same_node), ingester_service: - if !$._config.stateful_ingesters then - k.util.serviceFor($.ingester_deployment, $._config.service_ignored_labels) - else - k.util.serviceFor($.ingester_statefulset, $._config.service_ignored_labels), - - local podDisruptionBudget = k.policy.v1.podDisruptionBudget, + k.util.serviceFor($.ingester_statefulset, $._config.service_ignored_labels), ingester_pdb: podDisruptionBudget.new('loki-ingester-pdb') + diff --git a/production/ksonnet/loki/loki.libsonnet b/production/ksonnet/loki/loki.libsonnet index 3179248e222d4..199fb9e757f6d 100644 --- a/production/ksonnet/loki/loki.libsonnet +++ b/production/ksonnet/loki/loki.libsonnet @@ -21,9 +21,6 @@ (import 'memcached.libsonnet') + (import 'overrides-exporter.libsonnet') + -// WAL support -(import 'wal.libsonnet') + - // Index Gateway support (import 'index-gateway.libsonnet') + diff --git a/production/ksonnet/loki/multi-zone.libsonnet b/production/ksonnet/loki/multi-zone.libsonnet index 83c132549fe10..606f70099d0f8 100644 --- a/production/ksonnet/loki/multi-zone.libsonnet +++ b/production/ksonnet/loki/multi-zone.libsonnet @@ -1,22 +1,18 @@ +local rolloutOperator = import 'rollout-operator.libsonnet'; + { local container = $.core.v1.container, local deployment = $.apps.v1.deployment, local statefulSet = $.apps.v1.statefulSet, + local topologySpreadConstraints = $.core.v1.topologySpreadConstraint, local podDisruptionBudget = $.policy.v1.podDisruptionBudget, local volume = $.core.v1.volume, - local roleBinding = $.rbac.v1.roleBinding, - local role = $.rbac.v1.role, local service = $.core.v1.service, - local serviceAccount = $.core.v1.serviceAccount, - local servicePort = $.core.v1.servicePort, - local policyRule = $.rbac.v1.policyRule, - local podAntiAffinity = deployment.mixin.spec.template.spec.affinity.podAntiAffinity, + local pvc = $.core.v1.persistentVolumeClaim, - _images+:: { - rollout_operator: 'grafana/rollout-operator:v0.1.1', - }, + local podAntiAffinity = deployment.mixin.spec.template.spec.affinity.podAntiAffinity, - _config+: { + _config+:: { multi_zone_ingester_enabled: true, multi_zone_ingester_migration_enabled: false, multi_zone_ingester_replication_write_path_enabled: true, @@ -25,6 +21,17 @@ multi_zone_ingester_max_unavailable: std.max(1, std.floor($._config.multi_zone_ingester_replicas / 9)), multi_zone_default_ingester_zone: false, multi_zone_ingester_exclude_default: false, + multi_zone_ingester_name_prefix: 'ingester-zone', + + // If use_topology_spread is true, ingesters can run on nodes already running ingesters but will be + // spread through the available nodes using a TopologySpreadConstraints with a max skew + // of topology_spread_max_skew. + // See: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + // If use_topology_spread is false, ingesters will not be scheduled on nodes already running ingesters. + multi_zone_ingester_use_topology_spread: false, + multi_zone_ingester_topology_spread_max_skew: 1, + + node_selector: null, }, // Zone-aware replication. @@ -49,16 +56,43 @@ ingester_zone_b_args:: {}, ingester_zone_c_args:: {}, - - // For migration purposes we need to be able to configure a zone for single ingester statefulset deployments. ingester_container+:: if !$._config.multi_zone_default_ingester_zone then {} else container.withArgs($.util.mapToFlags($.ingester_args { 'ingester.availability-zone': 'zone-default', })), - newIngesterZoneContainer(zone, zone_args):: - local zone_name = 'zone-%s' % zone; + // remove after upstream PR is merged and is in a K release + // functions for k8s objects + newLokiPdb(deploymentName, maxUnavailable=1):: + local pdbName = '%s-pdb' % deploymentName; + + podDisruptionBudget.new() + + podDisruptionBudget.mixin.metadata.withName(pdbName) + + podDisruptionBudget.mixin.metadata.withLabels({ name: pdbName }) + + podDisruptionBudget.mixin.spec.selector.withMatchLabels({ name: deploymentName }) + + podDisruptionBudget.mixin.spec.withMaxUnavailable(maxUnavailable), + + newIngesterPdb(ingesterName):: + $.newLokiPdb(ingesterName), + + newLokiStatefulSet(name, replicas, container, pvc, podManagementPolicy='Parallel'):: + statefulSet.new(name, replicas, container, pvc) + + statefulSet.mixin.spec.withServiceName(name) + + statefulSet.mixin.spec.template.metadata.withLabels({ name: name }) + + statefulSet.mixin.spec.selector.withMatchLabels({ name: name }) + + statefulSet.mixin.spec.template.spec.securityContext.withFsGroup(10001) + // 10001 is the group ID assigned to Loki in the Dockerfile + statefulSet.mixin.spec.updateStrategy.withType('RollingUpdate') + + $.config_hash_mixin + + (if podManagementPolicy != null then statefulSet.mixin.spec.withPodManagementPolicy(podManagementPolicy) else {}) + + (if !std.isObject($._config.node_selector) then {} else statefulSet.mixin.spec.template.spec.withNodeSelectorMixin($._config.node_selector)) + + $.util.configVolumeMount('loki', '/etc/loki/config') + + $.util.configVolumeMount( + $._config.overrides_configmap_mount_name, + $._config.overrides_configmap_mount_path, + ), + + newIngesterZoneContainer(zone, zone_args):: $.ingester_container + container.withArgs($.util.mapToFlags( $.ingester_args + zone_args + { @@ -67,24 +101,31 @@ )), newIngesterZoneStatefulSet(zone, container):: - local name = 'ingester-zone-%s' % zone; + local name = '%(prefix)s-%(zone)s' % { prefix: $._config.multi_zone_ingester_name_prefix, zone: zone }; - // We can turn off anti-affinity for zone aware statefulsets since it's safe to - // deploy multiple ingesters from the same zone on the same node. - $.newIngesterStatefulSet(name, container, with_anti_affinity=false) + + self.newIngesterStatefulSet(name, container, with_anti_affinity=false) + statefulSet.mixin.metadata.withLabels({ 'rollout-group': 'ingester' }) + statefulSet.mixin.metadata.withAnnotations({ 'rollout-max-unavailable': std.toString($._config.multi_zone_ingester_max_unavailable) }) + statefulSet.mixin.spec.template.metadata.withLabels({ name: name, 'rollout-group': 'ingester' }) + statefulSet.mixin.spec.selector.withMatchLabels({ name: name, 'rollout-group': 'ingester' }) + statefulSet.mixin.spec.updateStrategy.withType('OnDelete') + - statefulSet.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800) + - statefulSet.spec.withVolumeClaimTemplatesMixin($.ingester_wal_pvc) + statefulSet.mixin.spec.withReplicas(std.ceil($._config.multi_zone_ingester_replicas / 3)) + + ( + if $._config.multi_zone_ingester_use_topology_spread then + statefulSet.spec.template.spec.withTopologySpreadConstraints( + // Evenly spread queriers among available nodes. + topologySpreadConstraints.labelSelector.withMatchLabels({ name: name }) + + topologySpreadConstraints.withTopologyKey('kubernetes.io/hostname') + + topologySpreadConstraints.withWhenUnsatisfiable('ScheduleAnyway') + + topologySpreadConstraints.withMaxSkew($._config.multi_zone_ingester_topology_spread_max_skew), + ) + else {} + ) + (if !std.isObject($._config.node_selector) then {} else statefulSet.mixin.spec.template.spec.withNodeSelectorMixin($._config.node_selector)) + if $._config.ingester_allow_multiple_replicas_on_same_node then {} else { spec+: // Allow to schedule 2+ ingesters in the same zone on the same node, but do not schedule 2+ ingesters in - // different zones on the samee node. In case of 1 node failure in the Kubernetes cluster, only ingesters + // different zones on the same node. In case of 1 node failure in the Kubernetes cluster, only ingesters // in 1 zone will be affected. podAntiAffinity.withRequiredDuringSchedulingIgnoredDuringExecution([ podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecutionType.new() + @@ -104,40 +145,39 @@ $.util.serviceFor(sts, $._config.service_ignored_labels) + service.mixin.spec.withClusterIp('None'), // Headless. - ingester_zone_a_container:: if !$._config.multi_zone_ingester_enabled then {} else + ingester_zone_a_container:: if !$._config.multi_zone_ingester_enabled then null else self.newIngesterZoneContainer('a', $.ingester_zone_a_args), ingester_zone_a_statefulset: if !$._config.multi_zone_ingester_enabled then {} else self.newIngesterZoneStatefulSet('a', $.ingester_zone_a_container), - ingester_zone_a_service: if !$._config.multi_zone_ingester_enabled then {} else + ingester_zone_a_service: if !$._config.multi_zone_ingester_enabled then null else $.newIngesterZoneService($.ingester_zone_a_statefulset), - ingester_zone_b_container:: if !$._config.multi_zone_ingester_enabled then {} else + ingester_zone_b_container:: if !$._config.multi_zone_ingester_enabled then null else self.newIngesterZoneContainer('b', $.ingester_zone_b_args), ingester_zone_b_statefulset: if !$._config.multi_zone_ingester_enabled then {} else self.newIngesterZoneStatefulSet('b', $.ingester_zone_b_container), - ingester_zone_b_service: if !$._config.multi_zone_ingester_enabled then {} else + ingester_zone_b_service: if !$._config.multi_zone_ingester_enabled then null else $.newIngesterZoneService($.ingester_zone_b_statefulset), - ingester_zone_c_container:: if !$._config.multi_zone_ingester_enabled then {} else + ingester_zone_c_container:: if !$._config.multi_zone_ingester_enabled then null else self.newIngesterZoneContainer('c', $.ingester_zone_c_args), ingester_zone_c_statefulset: if !$._config.multi_zone_ingester_enabled then {} else self.newIngesterZoneStatefulSet('c', $.ingester_zone_c_container), - ingester_zone_c_service: if !$._config.multi_zone_ingester_enabled then {} else + ingester_zone_c_service: if !$._config.multi_zone_ingester_enabled then null else $.newIngesterZoneService($.ingester_zone_c_statefulset), - ingester_rollout_pdb: if !$._config.multi_zone_ingester_enabled then {} else + ingester_rollout_pdb: if !$._config.multi_zone_ingester_enabled then null else podDisruptionBudget.new('ingester-rollout-pdb') + podDisruptionBudget.mixin.metadata.withLabels({ name: 'ingester-rollout-pdb' }) + podDisruptionBudget.mixin.spec.selector.withMatchLabels({ 'rollout-group': 'ingester' }) + podDisruptionBudget.mixin.spec.withMaxUnavailable(1), - // Single-zone ingesters shouldn't be configured when multi-zone is enabled. ingester_statefulset: // Remove the default "ingester" StatefulSet if multi-zone is enabled and no migration is in progress. if $._config.multi_zone_ingester_enabled && !$._config.multi_zone_ingester_migration_enabled @@ -147,7 +187,7 @@ ingester_service: // Remove the default "ingester" service if multi-zone is enabled and no migration is in progress. if $._config.multi_zone_ingester_enabled && !$._config.multi_zone_ingester_migration_enabled - then {} + then null else super.ingester_service, ingester_pdb: @@ -158,65 +198,7 @@ else if $._config.multi_zone_ingester_migration_enabled then super.ingester_pdb + podDisruptionBudget.mixin.spec.withMaxUnavailable(0) // Remove it if multi-zone is enabled and no migration is in progress. - else {}, - - // Rollout operator. - local rollout_operator_enabled = $._config.multi_zone_ingester_enabled, - - rollout_operator_args:: { - 'kubernetes.namespace': $._config.namespace, - }, - - rollout_operator_container:: - container.new('rollout-operator', $._images.rollout_operator) + - container.withArgsMixin($.util.mapToFlags($.rollout_operator_args)) + - container.withPorts([ - $.core.v1.containerPort.new('http-metrics', 8001), - ]) + - $.util.resourcesRequests('100m', '100Mi') + - $.util.resourcesLimits('1', '200Mi') + - container.mixin.readinessProbe.httpGet.withPath('/ready') + - container.mixin.readinessProbe.httpGet.withPort(8001) + - container.mixin.readinessProbe.withInitialDelaySeconds(5) + - container.mixin.readinessProbe.withTimeoutSeconds(1), - - rollout_operator_deployment: if !rollout_operator_enabled then {} else - deployment.new('rollout-operator', 1, [$.rollout_operator_container]) + - deployment.mixin.metadata.withName('rollout-operator') + - deployment.mixin.spec.template.spec.withServiceAccountName('rollout-operator') + - // Ensure Kubernetes doesn't run 2 operators at the same time. - deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(0) + - deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(1), - - rollout_operator_role: if !rollout_operator_enabled then {} else - role.new('rollout-operator-role') + - role.mixin.metadata.withNamespace($._config.namespace) + - role.withRulesMixin([ - policyRule.withApiGroups('') + - policyRule.withResources(['pods']) + - policyRule.withVerbs(['list', 'get', 'watch', 'delete']), - policyRule.withApiGroups('apps') + - policyRule.withResources(['statefulsets']) + - policyRule.withVerbs(['list', 'get', 'watch']), - policyRule.withApiGroups('apps') + - policyRule.withResources(['statefulsets/status']) + - policyRule.withVerbs(['update']), - ]), - - rollout_operator_rolebinding: if !rollout_operator_enabled then {} else - roleBinding.new('rollout-operator-rolebinding') + - roleBinding.mixin.metadata.withNamespace($._config.namespace) + - roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') + - roleBinding.mixin.roleRef.withKind('Role') + - roleBinding.mixin.roleRef.withName('rollout-operator-role') + - roleBinding.withSubjectsMixin({ - kind: 'ServiceAccount', - name: 'rollout-operator', - namespace: $._config.namespace, - }), - - rollout_operator_service_account: if !rollout_operator_enabled then {} else - serviceAccount.new('rollout-operator'), + else null, } + { distributor_args+:: if $._config.multi_zone_ingester_exclude_default then { 'distributor.excluded-zones': 'zone-default', @@ -225,4 +207,4 @@ ruler_args+:: if $._config.multi_zone_ingester_exclude_default then { 'distributor.excluded-zones': 'zone-default', } else {}, -} +} + rolloutOperator diff --git a/production/ksonnet/loki/rollout-operator.libsonnet b/production/ksonnet/loki/rollout-operator.libsonnet new file mode 100644 index 0000000000000..5bfed7f1dcc89 --- /dev/null +++ b/production/ksonnet/loki/rollout-operator.libsonnet @@ -0,0 +1,70 @@ +{ + local container = $.core.v1.container, + local deployment = $.apps.v1.deployment, + local policyRule = $.rbac.v1.policyRule, + local roleBinding = $.rbac.v1.roleBinding, + local role = $.rbac.v1.role, + local service = $.core.v1.service, + local serviceAccount = $.core.v1.serviceAccount, + + _images+:: { + rollout_operator: 'grafana/rollout-operator:v0.1.1', + }, + + rollout_operator_args:: { + 'kubernetes.namespace': $._config.namespace, + }, + + local rollout_operator_enabled = $._config.multi_zone_ingester_enabled, + + rollout_operator_container:: + container.new('rollout-operator', $._images.rollout_operator) + + container.withArgsMixin($.util.mapToFlags($.rollout_operator_args)) + + container.withPorts([ + $.core.v1.containerPort.new('http-metrics', 8001), + ]) + + $.util.resourcesRequests('100m', '100Mi') + + $.util.resourcesLimits('1', '200Mi') + + container.mixin.readinessProbe.httpGet.withPath('/ready') + + container.mixin.readinessProbe.httpGet.withPort(8001) + + container.mixin.readinessProbe.withInitialDelaySeconds(5) + + container.mixin.readinessProbe.withTimeoutSeconds(1), + + rollout_operator_deployment: if !rollout_operator_enabled then {} else + deployment.new('rollout-operator', 1, [$.rollout_operator_container]) + + deployment.mixin.metadata.withName('rollout-operator') + + deployment.mixin.spec.template.spec.withServiceAccountName('rollout-operator') + + // Ensure Kubernetes doesn't run 2 operators at the same time. + deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(0) + + deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(1), + + rollout_operator_role: if !rollout_operator_enabled then null else + role.new('rollout-operator-role') + + role.mixin.metadata.withNamespace($._config.namespace) + + role.withRulesMixin([ + policyRule.withApiGroups('') + + policyRule.withResources(['pods']) + + policyRule.withVerbs(['list', 'get', 'watch', 'delete']), + policyRule.withApiGroups('apps') + + policyRule.withResources(['statefulsets']) + + policyRule.withVerbs(['list', 'get', 'watch', 'update', 'patch']), + policyRule.withApiGroups('apps') + + policyRule.withResources(['statefulsets/status']) + + policyRule.withVerbs(['update']), + ]), + + rollout_operator_rolebinding: if !rollout_operator_enabled then null else + roleBinding.new('rollout-operator-rolebinding') + + roleBinding.mixin.metadata.withNamespace($._config.namespace) + + roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') + + roleBinding.mixin.roleRef.withKind('Role') + + roleBinding.mixin.roleRef.withName('rollout-operator-role') + + roleBinding.withSubjectsMixin({ + kind: 'ServiceAccount', + name: 'rollout-operator', + namespace: $._config.namespace, + }), + + rollout_operator_service_account: if !rollout_operator_enabled then null else + serviceAccount.new('rollout-operator'), +} diff --git a/production/ksonnet/loki/shipper.libsonnet b/production/ksonnet/loki/shipper.libsonnet index 282f723edeefd..40f9bc4ae1dd7 100644 --- a/production/ksonnet/loki/shipper.libsonnet +++ b/production/ksonnet/loki/shipper.libsonnet @@ -16,8 +16,6 @@ boltdb_shipper_shared_store: error 'must define boltdb_shipper_shared_store when using_boltdb_shipper=true. If this is not intentional, consider disabling it. shared_store is a backend key from the storage_config, such as (gcs) or (s3)', tsdb_shipper_shared_store: error 'must define tsdb_shipper_shared_store when using_tsdb_shipper=true. If this is not intentional, consider disabling it. shared_store is a backend key from the storage_config, such as (gcs) or (s3)', - // run ingesters and queriers as statefulsets when using boltdb-shipper to avoid using node disk for storing the index. - stateful_ingesters: if self.using_shipper_store then true else super.stateful_ingesters, stateful_queriers: if self.using_shipper_store && !self.use_index_gateway then true else super.stateful_queriers, compactor_pvc_size: '10Gi', diff --git a/production/ksonnet/loki/wal.libsonnet b/production/ksonnet/loki/wal.libsonnet deleted file mode 100644 index 831f5405b1d99..0000000000000 --- a/production/ksonnet/loki/wal.libsonnet +++ /dev/null @@ -1,45 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -{ - local with(x) = if $._config.wal_enabled then x else {}, - - _config+:: { - stateful_ingesters: if $._config.wal_enabled then true else super.stateful_ingesters, - loki+: with({ - ingester+: { - wal+: { - enabled: true, - dir: '/loki/wal', - replay_memory_ceiling: '7GB', // should be set upto ~50% of available memory - }, - }, - }), - }, - - local pvc = k.core.v1.persistentVolumeClaim, - - ingester_wal_pvc:: with( - pvc.new('ingester-wal') + - pvc.mixin.spec.resources.withRequests({ storage: '150Gi' }) + - pvc.mixin.spec.withAccessModes(['ReadWriteOnce']) + - pvc.mixin.spec.withStorageClassName($._config.ingester_pvc_class) - ), - - local container = k.core.v1.container, - local volumeMount = k.core.v1.volumeMount, - - ingester_container+:: with( - k.util.resourcesRequests('1', '7Gi') + - k.util.resourcesLimits('2', '14Gi') + - container.withVolumeMountsMixin([ - volumeMount.new('ingester-wal', $._config.loki.ingester.wal.dir), - ]), - ), - - - local statefulSet = k.apps.v1.statefulSet, - ingester_statefulset+: with( - statefulSet.spec.withVolumeClaimTemplatesMixin($.ingester_wal_pvc), - ), - -} From 1b0b9da3963d804a3cc1b5da6a24a411b9a68a36 Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Mon, 23 Oct 2023 05:11:02 +0200 Subject: [PATCH 10/33] Recover querier handler from panic. (#10983) --- pkg/loki/modules.go | 2 +- pkg/util/server/recovery.go | 15 +++++++++++++++ pkg/util/server/recovery_test.go | 9 +++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index b1874da090b8f..432bbd5d51938 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -507,7 +507,7 @@ func (t *Loki) initQuerier() (services.Service, error) { svc, err := querier.InitWorkerService( querierWorkerServiceConfig, prometheus.DefaultRegisterer, - handler, + serverutil.RecoveryMiddleware.Wrap(handler), t.Codec, ) if err != nil { diff --git a/pkg/util/server/recovery.go b/pkg/util/server/recovery.go index 3068a41347b02..4c0155e16db23 100644 --- a/pkg/util/server/recovery.go +++ b/pkg/util/server/recovery.go @@ -1,6 +1,7 @@ package server import ( + "context" "fmt" "net/http" "os" @@ -11,6 +12,8 @@ import ( grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" ) const maxStacksize = 8 * 1024 @@ -34,6 +37,18 @@ var ( }) RecoveryGRPCStreamInterceptor = grpc_recovery.StreamServerInterceptor(grpc_recovery.WithRecoveryHandler(onPanic)) RecoveryGRPCUnaryInterceptor = grpc_recovery.UnaryServerInterceptor(grpc_recovery.WithRecoveryHandler(onPanic)) + + RecoveryMiddleware queryrangebase.Middleware = queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { + return queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (res queryrangebase.Response, err error) { + defer func() { + if p := recover(); p != nil { + err = onPanic(p) + } + }() + res, err = next.Do(ctx, req) + return + }) + }) ) func onPanic(p interface{}) error { diff --git a/pkg/util/server/recovery_test.go b/pkg/util/server/recovery_test.go index be9c4ff3ade78..3a98b01b1beb4 100644 --- a/pkg/util/server/recovery_test.go +++ b/pkg/util/server/recovery_test.go @@ -9,6 +9,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" ) func Test_onPanic(t *testing.T) { @@ -32,6 +34,13 @@ func Test_onPanic(t *testing.T) { panic("foo") })) require.Error(t, err) + + _, err = RecoveryMiddleware. + Wrap(queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (res queryrangebase.Response, err error) { + panic("foo") + })). + Do(context.Background(), nil) + require.ErrorContains(t, err, "foo") } type fakeStream struct{} From c19f19f97491c5f8ed403c1a0a3a92f7c20c759c Mon Sep 17 00:00:00 2001 From: Sylvain Witmeyer Date: Mon, 23 Oct 2023 02:48:45 -0400 Subject: [PATCH 11/33] docs: Update sampling.md (#10987) **What this PR does / why we need it**: Fix parsing error in the doc **Special notes for your reviewer**: **Checklist** - [X] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [x] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. --- docs/sources/send-data/promtail/stages/sampling.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/send-data/promtail/stages/sampling.md b/docs/sources/send-data/promtail/stages/sampling.md index 5c7c68f42d729..00127b431dd57 100644 --- a/docs/sources/send-data/promtail/stages/sampling.md +++ b/docs/sources/send-data/promtail/stages/sampling.md @@ -47,7 +47,7 @@ pipeline_stages: app: - match: pipeline_name: "app2" - selector: "{app="poki"}" + selector: '{app="poki"}' stages: - sampling: rate: 0.1 From 32f0dbc2a53bdda0547e8c20925748eb41c1bb2c Mon Sep 17 00:00:00 2001 From: Kevin Burke Date: Mon, 23 Oct 2023 01:56:48 -0700 Subject: [PATCH 12/33] promtail/client: note client config is not deprecated (#10705) **What this PR does / why we need it**: Because the block of code immediately below `*Config` indicates that all arguments are deprecated, I incorrectly assumed that the `*Config` itself was deprecated and went hunting for a replacement. But the `*Config` struct is still how a client is expected to be configured. **Which issue(s) this PR fixes**: Updates #10702. **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [x] Documentation added - [x] Tests updated - [x] `CHANGELOG.md` updated - [x] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [x] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [x] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) --- clients/pkg/promtail/client/config.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clients/pkg/promtail/client/config.go b/clients/pkg/promtail/client/config.go index c41e63ff28b58..ab36353ba4903 100644 --- a/clients/pkg/promtail/client/config.go +++ b/clients/pkg/promtail/client/config.go @@ -23,6 +23,10 @@ const ( // Config describes configuration for an HTTP pusher client. type Config struct { + // Note even though the command line flag arguments which use this config + // are deprecated, this struct is still the primary way to configure + // a promtail client + Name string `yaml:"name,omitempty"` URL flagext.URLValue BatchWait time.Duration `yaml:"batchwait"` From 3f28e6362da86587778607ccc2c355232318551b Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Mon, 23 Oct 2023 11:40:05 +0200 Subject: [PATCH 13/33] Fix flaky bloomgateway test case (#11001) The order of active users is non-deterministic, so we need to check for matching elements, rather than for equal slice. Failed test run: https://drone.grafana.net/grafana/loki/29579/3/6 Passed test run: https://drone.grafana.net/grafana/loki/29577/3/6 Signed-off-by: Christian Haudum --- pkg/bloomgateway/bloomgateway_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go index bfb2b9b9d8e21..c0d9ffdfae230 100644 --- a/pkg/bloomgateway/bloomgateway_test.go +++ b/pkg/bloomgateway/bloomgateway_test.go @@ -245,6 +245,6 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { _, err = gw.FilterChunkRefs(ctx, req) require.NoError(t, err) } - require.Equal(t, tenants, gw.activeUsers.ActiveUsers()) + require.ElementsMatch(t, tenants, gw.activeUsers.ActiveUsers()) }) } From 6069df8f7d17ffce07f4dc5351380ce3a6cef791 Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Mon, 23 Oct 2023 15:23:36 +0530 Subject: [PATCH 14/33] ingestion: native otlp ingestion support (#10727) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **What this PR does / why we need it**: Add support for natively supporting logs ingestion in OTLP format. `/otlp/v1/logs` is the new endpoint where users can push logs in OTLP format. It accepts logs serialized in JSON or proto format. Since OTEL format is very different than what Loki storage model, here is how data in OTEL format will be mapped to Loki data model: * Index labels: The Resource Attributes map quite well to Index labels in Loki since both usually identify the source of the logs. The problem however is that Resource attributes in OTLP can have an unbounded number of values while Loki has a default limit of having up to 30 labels. Since Index labels in Loki can largely drive the kind of querying experience the users are going to have, we have chosen select attributes which would be picked as Index Labels. The ones that are not picked up as Index labels would be stored as Structured Metadata with each log entry. * Timestamp: LogRecord.TimeUnixNano * LogLine: LogRecord.Body holds the body of the log. However, since Loki only supports Log body in string format, we will stringify non-string values using [AsString method from OTEL collector lib](https://github.com/open-telemetry/opentelemetry-collector/blob/ab3d6c5b64701e690aaa340b0a63f443ff22c1f0/pdata/pcommon/value.go#L353). * Structured Metadata: Anything which can’t be stored in Index labels and LogLine. Here is a non-exhaustive list of what will be stored in Structured Metadata to give a sense of what it will hold: * Resource Attributes not stored as Index labels is replicated and stored with each log entry. * Everything under InstrumentationScope is replicated and stored with each log entry. * Everything under LogRecord except LogRecord.Body, LogRecord.TimeUnixNano and sometimes LogRecord.ObservedTimestamp. *NOTES*: * Since Loki does not support `.` or any other special characters other than `_` in label names, we replace all non-supported characters with `_`. * Since Loki only supports string in values of Index Labels and Structured Metadata, all the complex types are converted as follows: * Map would be flattened into label keys using `_` as separator, same as how we do it in [json parser in LogQL](https://grafana.com/docs/loki/latest/query/log_queries/#json). * Everything else is stringified using [AsString method from OTEL collector lib](https://github.com/open-telemetry/opentelemetry-collector/blob/ab3d6c5b64701e690aaa340b0a63f443ff22c1f0/pdata/pcommon/value.go#L353) **Special notes for your reviewer**: I will open follow-up PRs for: * Documentation * Make blessed attributes list configurable per tenant. **Checklist** - [x] Tests updated - [x] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label --- CHANGELOG.md | 1 + .../promtail/targets/lokipush/pushtarget.go | 2 +- go.mod | 20 +- go.sum | 43 +- integration/client/client.go | 76 +- integration/cluster/cluster.go | 4 +- .../loki_micro_services_delete_test.go | 4 +- integration/loki_micro_services_test.go | 169 ++- integration/loki_rule_eval_test.go | 6 +- integration/loki_simple_scalable_test.go | 8 +- integration/loki_single_binary_test.go | 9 +- integration/multi_tenant_queries_test.go | 4 +- integration/per_request_limits_test.go | 2 +- pkg/distributor/http.go | 10 +- pkg/loghttp/push/otlp.go | 370 +++++ pkg/loghttp/push/otlp_test.go | 444 ++++++ pkg/loghttp/push/push.go | 141 +- pkg/loghttp/push/push_test.go | 2 +- pkg/loki/modules.go | 12 +- .../.release-please-manifest-individual.json | 8 +- .../.release-please-manifest-submodules.json | 73 +- .../go/.release-please-manifest.json | 2 +- vendor/cloud.google.com/go/CHANGES.md | 21 + vendor/cloud.google.com/go/CONTRIBUTING.md | 10 +- .../go/compute/internal/version.go | 2 +- vendor/cloud.google.com/go/doc.go | 160 +- .../go/internal/.repo-metadata-full.json | 112 +- vendor/cloud.google.com/go/pubsub/CHANGES.md | 12 + .../go/pubsub/internal/version.go | 2 +- .../go/pubsub/subscription.go | 70 +- ...elease-please-config-yoshi-submodules.json | 15 +- .../least_request/v3/least_request.pb.go | 266 ++++ .../v3/least_request.pb.validate.go | 237 +++ .../pkg/translator/prometheus/LICENSE | 201 +++ .../pkg/translator/prometheus/Makefile | 1 + .../pkg/translator/prometheus/README.md | 115 ++ .../pkg/translator/prometheus/metadata.yaml | 3 + .../translator/prometheus/normalize_label.go | 53 + .../translator/prometheus/normalize_name.go | 280 ++++ .../pkg/translator/prometheus/unit_to_ucum.go | 90 ++ .../collector/featuregate/LICENSE | 202 +++ .../collector/featuregate/Makefile | 1 + .../collector/featuregate/README.md | 77 + .../collector/featuregate/flag.go | 55 + .../collector/featuregate/gate.go | 53 + .../collector/featuregate/registry.go | 150 ++ .../collector/featuregate/stage.go | 44 + .../collector/pdata/plog/encoding.go | 31 + .../pdata/plog/generated_logrecord.go | 148 ++ .../pdata/plog/generated_logrecordslice.go | 143 ++ .../pdata/plog/generated_resourcelogs.go | 70 + .../pdata/plog/generated_resourcelogsslice.go | 143 ++ .../pdata/plog/generated_scopelogs.go | 70 + .../pdata/plog/generated_scopelogsslice.go | 143 ++ .../collector/pdata/plog/json.go | 131 ++ .../collector/pdata/plog/log_record_flags.go | 28 + .../collector/pdata/plog/logs.go | 51 + .../collector/pdata/plog/pb.go | 33 + .../generated_exportpartialsuccess.go | 67 + .../collector/pdata/plog/plogotlp/grpc.go | 84 ++ .../collector/pdata/plog/plogotlp/request.go | 71 + .../collector/pdata/plog/plogotlp/response.go | 81 ++ .../collector/pdata/plog/severity_number.go | 96 ++ .../net/http/otelhttp/handler.go | 63 +- .../http/otelhttp/internal/semconvutil/gen.go | 21 - .../otelhttp/internal/semconvutil/httpconv.go | 552 ------- .../net/http/otelhttp/transport.go | 10 +- .../net/http/otelhttp/version.go | 2 +- .../otel/semconv/internal/v2/http.go | 404 ++++++ .../semconv/internal/v2/net.go} | 74 +- .../otel/semconv/v1.17.0/httpconv/http.go | 152 ++ .../admin/v2/bigtable_table_admin.pb.go | 1286 ++++++++++------- .../googleapis/bigtable/admin/v2/table.pb.go | 293 ++-- .../googleapis/bigtable/v2/bigtable.pb.go | 10 +- .../bigtable/v2/feature_flags.pb.go | 76 +- vendor/google.golang.org/grpc/README.md | 58 +- .../grpc/attributes/attributes.go | 59 +- .../grpc/authz/audit/stdout/stdout_logger.go | 2 +- .../grpc/balancer/balancer.go | 47 +- .../grpc/balancer/base/balancer.go | 22 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 +- .../grpc/balancer/grpclb/grpclb.go | 33 +- .../balancer/grpclb/grpclb_remote_balancer.go | 22 +- .../grpc/balancer/grpclb/grpclb_util.go | 52 +- .../balancer/leastrequest/leastrequest.go | 181 +++ .../balancer/weightedroundrobin/balancer.go | 16 +- .../weightedroundrobin/weightedroundrobin.go | 2 +- .../balancer/weightedtarget/weightedtarget.go | 11 +- .../grpc/balancer_conn_wrappers.go | 75 +- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- vendor/google.golang.org/grpc/call.go | 11 +- vendor/google.golang.org/grpc/clientconn.go | 236 ++- vendor/google.golang.org/grpc/codec.go | 8 +- .../alts/internal/handshaker/handshaker.go | 57 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 2 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 2 +- .../grpc_gcp/transport_security_common.pb.go | 2 +- .../tls/certprovider/pemfile/builder.go | 2 +- .../credentials/tls/certprovider/provider.go | 2 +- .../credentials/tls/certprovider/store.go | 8 +- vendor/google.golang.org/grpc/dialoptions.go | 37 + .../grpc/encoding/encoding.go | 4 +- .../grpc/encoding/gzip/gzip.go | 4 +- .../grpc/encoding/proto/proto.go | 4 +- .../grpc/grpclog/component.go | 40 +- .../google.golang.org/grpc/grpclog/grpclog.go | 30 +- .../google.golang.org/grpc/grpclog/logger.go | 30 +- .../grpc/grpclog/loggerv2.go | 56 +- .../grpc/health/grpc_health_v1/health.pb.go | 2 +- vendor/google.golang.org/grpc/interceptor.go | 12 +- .../balancer/gracefulswitch/gracefulswitch.go | 59 +- .../internal/balancergroup/balancergroup.go | 153 +- .../grpc/internal/balancerload/load.go | 4 +- .../grpc/internal/binarylog/method_logger.go | 4 +- .../grpc/internal/buffer/unbounded.go | 18 +- .../grpc/internal/cache/timeoutCache.go | 12 +- .../grpc/internal/channelz/funcs.go | 69 +- .../grpc/internal/channelz/logging.go | 12 +- .../grpc/internal/channelz/types.go | 5 + .../grpc/internal/channelz/util_linux.go | 2 +- .../grpc/internal/channelz/util_nonlinux.go | 2 +- .../grpc/internal/credentials/credentials.go | 8 +- .../credentials/xds/handshake_info.go | 26 +- .../grpc/internal/envconfig/envconfig.go | 12 +- .../grpc/internal/grpclog/grpclog.go | 40 +- .../grpc/internal/grpclog/prefixLogger.go | 8 +- .../grpc/internal/grpcrand/grpcrand.go | 7 + .../internal/grpcsync/callback_serializer.go | 54 +- .../grpc/internal/grpcsync/pubsub.go | 121 ++ .../grpc/internal/hierarchy/hierarchy.go | 2 +- .../grpc/{ => internal/idle}/idle.go | 188 +-- .../grpc/internal/internal.go | 45 +- .../grpc/internal/metadata/metadata.go | 2 +- .../grpc/internal/pretty/pretty.go | 2 +- .../internal/proto/grpc_lookup_v1/rls.pb.go | 2 +- .../proto/grpc_lookup_v1/rls_config.pb.go | 2 +- .../grpc/internal/resolver/config_selector.go | 4 +- .../internal/resolver/dns/dns_resolver.go | 74 +- .../grpc/internal/status/status.go | 8 +- .../grpc/internal/transport/controlbuf.go | 16 +- .../grpc/internal/transport/http2_client.go | 45 +- .../grpc/internal/transport/http2_server.go | 19 +- .../grpc/internal/transport/http_util.go | 59 +- .../grpc/internal/transport/transport.go | 17 +- .../grpc/internal/wrr/edf.go | 10 +- .../grpc/internal/wrr/random.go | 6 +- .../grpc/internal/wrr/wrr.go | 4 +- .../grpc/internal/xds/rbac/matchers.go | 6 + .../grpc/orca/call_metrics.go | 8 +- .../grpc/orca/internal/internal.go | 2 +- vendor/google.golang.org/grpc/orca/orca.go | 5 +- .../google.golang.org/grpc/orca/producer.go | 2 +- .../google.golang.org/grpc/picker_wrapper.go | 34 +- vendor/google.golang.org/grpc/pickfirst.go | 88 +- vendor/google.golang.org/grpc/preloader.go | 2 +- .../grpc/resolver/manual/manual.go | 17 +- vendor/google.golang.org/grpc/resolver/map.go | 10 +- .../grpc/resolver/resolver.go | 84 +- .../grpc/resolver_conn_wrapper.go | 10 +- vendor/google.golang.org/grpc/rpc_util.go | 44 +- vendor/google.golang.org/grpc/server.go | 162 ++- .../grpc/shared_buffer_pool.go | 154 ++ vendor/google.golang.org/grpc/stats/stats.go | 14 +- .../google.golang.org/grpc/status/status.go | 14 +- vendor/google.golang.org/grpc/stream.go | 130 +- vendor/google.golang.org/grpc/trace.go | 6 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 7 +- .../grpc/xds/googledirectpath/googlec2p.go | 2 - .../grpc/xds/internal/balancer/balancer.go | 1 + .../balancer/cdsbalancer/cdsbalancer.go | 25 +- .../balancer/clusterimpl/clusterimpl.go | 99 +- .../balancer/clustermanager/clustermanager.go | 11 +- .../clusterresolver/clusterresolver.go | 43 +- .../balancer/clusterresolver/configbuilder.go | 12 +- .../clusterresolver/resource_resolver.go | 51 +- .../clusterresolver/resource_resolver_dns.go | 64 +- .../clusterresolver/resource_resolver_eds.go | 92 +- .../balancer/outlierdetection/balancer.go | 35 +- .../outlierdetection/subconn_wrapper.go | 1 + .../internal/balancer/priority/balancer.go | 14 +- .../xds/internal/balancer/ringhash/picker.go | 15 +- .../internal/balancer/ringhash/ringhash.go | 47 +- .../internal/balancer/wrrlocality/balancer.go | 6 +- .../clusterspecifier/cluster_specifier.go | 2 +- .../xds/internal/httpfilter/fault/fault.go | 4 +- .../grpc/xds/internal/internal.go | 4 +- .../xds/internal/resolver/serviceconfig.go | 22 +- .../grpc/xds/internal/xdsclient/authority.go | 11 +- .../grpc/xds/internal/xdsclient/client.go | 1 - .../xdsclient/clientimpl_authority.go | 3 +- .../internal/xdsclient/clientimpl_watchers.go | 31 - .../grpc/xds/internal/xdsclient/load/store.go | 6 +- .../grpc/xds/internal/xdsclient/logging.go | 12 +- .../xdslbregistry/converter/converter.go | 31 +- .../internal/xdsclient/xdsresource/errors.go | 2 +- .../xdsclient/xdsresource/resource_type.go | 2 +- .../internal/xdsclient/xdsresource/type.go | 2 +- .../xdsclient/xdsresource/type_eds.go | 8 - .../xdsclient/xdsresource/unmarshal_cds.go | 26 + vendor/google.golang.org/grpc/xds/server.go | 123 +- vendor/modules.txt | 36 +- 202 files changed, 8755 insertions(+), 3119 deletions(-) create mode 100644 pkg/loghttp/push/otlp.go create mode 100644 pkg/loghttp/push/otlp_test.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/LICENSE create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/Makefile create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/README.md create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/metadata.yaml create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_label.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_name.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/unit_to_ucum.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/README.md create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/flag.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/gate.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/registry.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/stage.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/json.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/log_record_flags.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/logs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportpartialsuccess.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/grpc.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/request.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/response.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go rename vendor/go.opentelemetry.io/{contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go => otel/semconv/internal/v2/net.go} (72%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go create mode 100644 vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go rename vendor/google.golang.org/grpc/{ => internal/idle}/idle.go (61%) create mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 247af303efc57..c62cf0894d703 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ * [10793](https://github.com/grafana/loki/pull/10793) **ashwanthgoli** Config: Better configuration defaults to provide a better experience for users out of the box. * [10785](https://github.com/grafana/loki/pull/10785) **ashwanthgoli** Config: Removes `querier.worker-parallelism` and updates default value of `querier.max-concurrent` to 4. * [10733](https://github.com/grafana/loki/pull/10733) **shantanualsi** Add support for case-insensitive logql funtions +* [10727](https://github.com/grafana/loki/pull/10727) **sandeepsukhani** Native otlp ingestion support ##### Fixes diff --git a/clients/pkg/promtail/targets/lokipush/pushtarget.go b/clients/pkg/promtail/targets/lokipush/pushtarget.go index 7bd63e47d6de0..7ffe2ddfc9318 100644 --- a/clients/pkg/promtail/targets/lokipush/pushtarget.go +++ b/clients/pkg/promtail/targets/lokipush/pushtarget.go @@ -111,7 +111,7 @@ func (t *PushTarget) run() error { func (t *PushTarget) handleLoki(w http.ResponseWriter, r *http.Request) { logger := util_log.WithContext(r.Context(), util_log.Logger) userID, _ := tenant.TenantID(r.Context()) - req, err := push.ParseRequest(logger, userID, r, nil) + req, err := push.ParseRequest(logger, userID, r, nil, push.ParseLokiRequest) if err != nil { level.Warn(t.logger).Log("msg", "failed to parse incoming push request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) diff --git a/go.mod b/go.mod index 432cd5b97c02b..a7415b9f2262c 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( cloud.google.com/go/bigtable v1.18.1 - cloud.google.com/go/pubsub v1.32.0 + cloud.google.com/go/pubsub v1.33.0 cloud.google.com/go/storage v1.30.1 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.14.0 @@ -104,7 +104,7 @@ require ( golang.org/x/sys v0.13.0 golang.org/x/time v0.3.0 google.golang.org/api v0.132.0 - google.golang.org/grpc v1.56.3 + google.golang.org/grpc v1.58.2 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -126,11 +126,13 @@ require ( github.com/grafana/loki/pkg/push v0.0.0-20231017172654-cfc4f0e84adc github.com/heroku/x v0.0.61 github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.86.0 github.com/prometheus/alertmanager v0.26.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/richardartoul/molecule v1.0.0 github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3 github.com/willf/bloom v2.0.3+incompatible + go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b golang.org/x/oauth2 v0.10.0 @@ -140,8 +142,8 @@ require ( ) require ( - cloud.google.com/go v0.110.4 // indirect - cloud.google.com/go/compute v1.22.0 // indirect + cloud.google.com/go v0.110.7 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.1 // indirect cloud.google.com/go/longrunning v0.5.1 // indirect @@ -300,9 +302,9 @@ require ( go.etcd.io/etcd/client/v3 v3.5.4 // indirect go.mongodb.org/mongo-driver v1.12.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 // indirect + go.opentelemetry.io/collector/featuregate v1.0.0-rcv0015 // indirect go.opentelemetry.io/collector/semconv v0.81.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect go.opentelemetry.io/otel v1.18.0 // indirect go.opentelemetry.io/otel/metric v1.18.0 // indirect go.opentelemetry.io/otel/trace v1.18.0 // indirect @@ -313,9 +315,9 @@ require ( golang.org/x/tools v0.11.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect + google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 88ec80bbecaa6..74f9d98ad4fc1 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,8 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9 cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= -cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= +cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= @@ -60,8 +60,8 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.22.0 h1:cB8R6FtUtT1TYGl5R3xuxnW6OUIc/DrT2aiR16TTG7Y= -cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= @@ -84,7 +84,7 @@ cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/kms v1.12.1 h1:xZmZuwy2cwzsocmKDOPu4BL7umg8QXagQx6fKVmf45U= +cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= @@ -103,8 +103,8 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.32.0 h1:JOEkgEYBuUTHSyHS4TcqOFuWr+vD6qO/imsFqShUCp4= -cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= @@ -1423,6 +1423,9 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.86.0 h1:g7HlND105lwm7NW8JCxAfbpaFyk1WKcEUUVwchIo9zE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.86.0 h1:nnzuEQYlsRIkMPAw1jEl+8L2Is68QQl58QvY2dHHgDU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.86.0/go.mod h1:prodbjWZpQkRcd45W2wkRaryv6JomuuWZUmM6mDj27k= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -1785,12 +1788,14 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= +go.opentelemetry.io/collector/featuregate v1.0.0-rcv0015 h1:Wv8JFRUD01MwWkhZwF85to5oukHDFPRjnt88ArDFqco= +go.opentelemetry.io/collector/featuregate v1.0.0-rcv0015/go.mod h1:fLmJMf1AoHttkF8p5oJAc4o5ZpHu8yO5XYJ7gbLCLzo= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 h1:8PzrQFk3oKiT1Sd5EmNEcagdMyt1KcBy5/OyF5He5gY= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0015/go.mod h1:I1PqyHJlsXjANC73tp43nDId7/jiv82NoZZ6uS0xdwM= go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= go.opentelemetry.io/otel v1.18.0 h1:TgVozPGZ01nHyDZxK5WGPFB9QexeTMXEH7+tIClWfzs= go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI= go.opentelemetry.io/otel/metric v1.18.0 h1:JwVzw94UYmbx3ej++CwLUQZxEODDj/pOuTCvzhtRrSQ= @@ -2465,12 +2470,12 @@ google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+S google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= -google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= -google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM= -google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE= -google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2513,8 +2518,8 @@ google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/integration/client/client.go b/integration/client/client.go index 8445f271e2eee..8e13ed8ef1364 100644 --- a/integration/client/client.go +++ b/integration/client/client.go @@ -15,6 +15,9 @@ import ( "github.com/grafana/dskit/user" "github.com/prometheus/prometheus/model/labels" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/plog/plogotlp" ) const requestTimeout = 30 * time.Second @@ -85,22 +88,7 @@ func New(instanceID, token, baseURL string, opts ...Option) *Client { } } -// PushLogLine creates a new logline with the current time as timestamp -func (c *Client) PushLogLine(line string, extraLabels ...map[string]string) error { - return c.pushLogLine(line, c.Now, nil, extraLabels...) -} - -func (c *Client) PushLogLineWithStructuredMetadata(line string, structuredMetadata map[string]string, extraLabels ...map[string]string) error { - return c.PushLogLineWithTimestampAndStructuredMetadata(line, c.Now, structuredMetadata, extraLabels...) -} - -// PushLogLineWithTimestamp creates a new logline at the given timestamp -// The timestamp has to be a Unix timestamp (epoch seconds) -func (c *Client) PushLogLineWithTimestamp(line string, timestamp time.Time, extraLabels ...map[string]string) error { - return c.pushLogLine(line, timestamp, nil, extraLabels...) -} - -func (c *Client) PushLogLineWithTimestampAndStructuredMetadata(line string, timestamp time.Time, structuredMetadata map[string]string, extraLabelList ...map[string]string) error { +func (c *Client) PushLogLine(line string, timestamp time.Time, structuredMetadata map[string]string, extraLabelList ...map[string]string) error { // If the structuredMetadata map is empty, labels.FromMap will allocate some empty slices. // Since this code is executed for every log line we receive, as an optimization // to avoid those allocations we'll call labels.FromMap only if the map is not empty. @@ -111,6 +99,10 @@ func (c *Client) PushLogLineWithTimestampAndStructuredMetadata(line string, time return c.pushLogLine(line, timestamp, metadata, extraLabelList...) } +func (c *Client) PushOTLPLogLine(line string, timestamp time.Time, logAttributes map[string]any) error { + return c.pushOTLPLogLine(line, timestamp, logAttributes) +} + func formatTS(ts time.Time) string { return strconv.FormatInt(ts.UnixNano(), 10) } @@ -171,10 +163,58 @@ func (c *Client) pushLogLine(line string, timestamp time.Time, structuredMetadat buf, err := io.ReadAll(res.Body) if err != nil { - return fmt.Errorf("reading request failed with status code %v: %w", res.StatusCode, err) + return fmt.Errorf("reading response failed with status code %v: %v", res.StatusCode, err) + } + + return fmt.Errorf("request failed with status code %v: %s", res.StatusCode, buf) +} + +// pushLogLine creates a new logline +func (c *Client) pushOTLPLogLine(line string, timestamp time.Time, logAttributes map[string]any) error { + apiEndpoint := fmt.Sprintf("%s/otlp/v1/logs", c.baseURL) + + logs := plog.NewLogs() + + logs.ResourceLogs().AppendEmpty().Resource().Attributes().PutStr("service.name", "varlog") + logRecord := logs.ResourceLogs().At(0).ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() + logRecord.SetTimestamp(pcommon.Timestamp(timestamp.UnixNano())) + logRecord.Body().SetStr(line) + if len(logAttributes) > 0 { + if err := logRecord.Attributes().FromRaw(logAttributes); err != nil { + return err + } + } + + ereq := plogotlp.NewExportRequestFromLogs(logs) + + data, err := ereq.MarshalJSON() + if err != nil { + return err + } + req, err := http.NewRequest("POST", apiEndpoint, bytes.NewReader(data)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-Scope-OrgID", c.instanceID) + + // Execute HTTP request + res, err := c.httpClient.Do(req) + if err != nil { + return err + } + + if res.StatusCode/100 == 2 { + defer res.Body.Close() + return nil + } + + buf, err := io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("reading response failed with status code %v: %v", res.StatusCode, err) } - return fmt.Errorf("request failed with status code %v: %w", res.StatusCode, errors.New(string(buf))) + return fmt.Errorf("request failed with status code %v: %s", res.StatusCode, buf) } func (c *Client) Get(path string) (*http.Response, error) { diff --git a/integration/cluster/cluster.go b/integration/cluster/cluster.go index 9d7e941dc0732..a52859cee4fde 100644 --- a/integration/cluster/cluster.go +++ b/integration/cluster/cluster.go @@ -24,8 +24,8 @@ import ( "gopkg.in/yaml.v2" "github.com/grafana/loki/integration/util" - "github.com/grafana/loki/pkg/loki" + "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/util/cfg" util_log "github.com/grafana/loki/pkg/util/log" @@ -209,6 +209,8 @@ func (c *Cluster) Restart() error { } func (c *Cluster) Cleanup() error { + // cleanup singleton boltdb shipper client instances + storage.ResetBoltDBIndexClientsWithShipper() return c.stop(true) } diff --git a/integration/loki_micro_services_delete_test.go b/integration/loki_micro_services_delete_test.go index 05dfb021ee4f5..e1759783967fc 100644 --- a/integration/loki_micro_services_delete_test.go +++ b/integration/loki_micro_services_delete_test.go @@ -26,13 +26,11 @@ type pushRequest struct { } func TestMicroServicesDeleteRequest(t *testing.T) { - storage.ResetBoltDBIndexClientsWithShipper() clu := cluster.New(nil, cluster.SchemaWithBoltDBAndBoltDB, func(c *cluster.Cluster) { c.SetSchemaVer("v13") }) defer func() { assert.NoError(t, clu.Cleanup()) - storage.ResetBoltDBIndexClientsWithShipper() }() // initially, run only compactor, index-gateway and distributor. @@ -236,7 +234,7 @@ func TestMicroServicesDeleteRequest(t *testing.T) { // ingest some log lines for _, pr := range pushRequests { for _, entry := range pr.entries { - require.NoError(t, cliDistributor.PushLogLineWithTimestampAndStructuredMetadata( + require.NoError(t, cliDistributor.PushLogLine( entry.Line, entry.Timestamp, logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata).Map(), diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go index db255adabebba..d2a285d154572 100644 --- a/integration/loki_micro_services_test.go +++ b/integration/loki_micro_services_test.go @@ -96,11 +96,11 @@ func TestMicroServicesIngestQuery(t *testing.T) { t.Run("ingest-logs", func(t *testing.T) { // ingest some log lines - require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineA", now.Add(-45*time.Minute), map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineB", now.Add(-45*time.Minute), map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineA", now.Add(-45*time.Minute), nil, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineB", now.Add(-45*time.Minute), nil, map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLine("lineC", map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLine("lineD", map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineC", now, nil, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineD", now, nil, map[string]string{"job": "fake"})) }) t.Run("query", func(t *testing.T) { @@ -211,8 +211,8 @@ func TestMicroServicesIngestQueryWithSchemaChange(t *testing.T) { cliQueryFrontend.Now = now t.Run("ingest-logs", func(t *testing.T) { - require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineA", time.Now().Add(-72*time.Hour), map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineB", time.Now().Add(-48*time.Hour), map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineA", time.Now().Add(-72*time.Hour), nil, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineB", time.Now().Add(-48*time.Hour), nil, map[string]string{"job": "fake"})) }) t.Run("query-lookback-default", func(t *testing.T) { @@ -288,8 +288,8 @@ func TestMicroServicesIngestQueryWithSchemaChange(t *testing.T) { t.Run("ingest-logs-new-period", func(t *testing.T) { // ingest logs to the new period - require.NoError(t, cliDistributor.PushLogLine("lineC", map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLine("lineD", map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineC", now, nil, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineD", now, nil, map[string]string{"job": "fake"})) }) t.Run("query-both-periods-with-default-lookback", func(t *testing.T) { @@ -340,11 +340,9 @@ func TestMicroServicesIngestQueryOverMultipleBucketSingleProvider(t *testing.T) "boltdb-and-tsdb": cluster.SchemaWithBoltDBAndTSDB, } { t.Run(name, func(t *testing.T) { - storage.ResetBoltDBIndexClientsWithShipper() clu := cluster.New(nil, opt) defer func() { - storage.ResetBoltDBIndexClientsWithShipper() assert.NoError(t, clu.Cleanup()) }() @@ -406,12 +404,12 @@ func TestMicroServicesIngestQueryOverMultipleBucketSingleProvider(t *testing.T) cliQueryFrontend.Now = now t.Run("ingest-logs", func(t *testing.T) { - require.NoError(t, cliDistributor.PushLogLineWithTimestampAndStructuredMetadata("lineA", time.Now().Add(-48*time.Hour), map[string]string{"traceID": "123"}, map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLineWithTimestampAndStructuredMetadata("lineB", time.Now().Add(-36*time.Hour), map[string]string{"traceID": "456"}, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineA", time.Now().Add(-48*time.Hour), map[string]string{"traceID": "123"}, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineB", time.Now().Add(-36*time.Hour), map[string]string{"traceID": "456"}, map[string]string{"job": "fake"})) // ingest logs to the current period - require.NoError(t, cliDistributor.PushLogLineWithStructuredMetadata("lineC", map[string]string{"traceID": "789"}, map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLineWithStructuredMetadata("lineD", map[string]string{"traceID": "123"}, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineC", now, map[string]string{"traceID": "789"}, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineD", now, map[string]string{"traceID": "123"}, map[string]string{"job": "fake"})) }) @@ -556,11 +554,11 @@ func TestSchedulerRing(t *testing.T) { t.Run("ingest-logs", func(t *testing.T) { // ingest some log lines - require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineA", now.Add(-45*time.Minute), map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineB", now.Add(-45*time.Minute), map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineA", now.Add(-45*time.Minute), nil, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineB", now.Add(-45*time.Minute), nil, map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLine("lineC", map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLine("lineD", map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineC", now, nil, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineD", now, nil, map[string]string{"job": "fake"})) }) t.Run("query", func(t *testing.T) { @@ -663,8 +661,8 @@ func TestQueryTSDB_WithCachedPostings(t *testing.T) { }) t.Run("ingest-logs", func(t *testing.T) { - require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineA", time.Now().Add(-72*time.Hour), map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLineWithTimestamp("lineB", time.Now().Add(-48*time.Hour), map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineA", time.Now().Add(-72*time.Hour), nil, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineB", time.Now().Add(-48*time.Hour), nil, map[string]string{"job": "fake"})) }) // restart ingester which should flush the chunks and index @@ -695,8 +693,8 @@ func TestQueryTSDB_WithCachedPostings(t *testing.T) { }) // ingest logs with ts=now. - require.NoError(t, cliDistributor.PushLogLine("lineC", map[string]string{"job": "fake"})) - require.NoError(t, cliDistributor.PushLogLine("lineD", map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineC", now, nil, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineD", now, nil, map[string]string{"job": "fake"})) // default length is 7 days. resp, err := cliQueryFrontend.RunRangeQuery(context.Background(), `{job="fake"}`) @@ -714,6 +712,133 @@ func TestQueryTSDB_WithCachedPostings(t *testing.T) { } +func TestOTLPLogsIngestQuery(t *testing.T) { + clu := cluster.New(nil, func(c *cluster.Cluster) { + c.SetSchemaVer("v13") + }) + defer func() { + assert.NoError(t, clu.Cleanup()) + }() + + // run initially the compactor, indexgateway, and distributor. + var ( + tCompactor = clu.AddComponent( + "compactor", + "-target=compactor", + "-boltdb.shipper.compactor.compaction-interval=1s", + "-boltdb.shipper.compactor.retention-delete-delay=1s", + // By default, a minute is added to the delete request start time. This compensates for that. + "-boltdb.shipper.compactor.delete-request-cancel-period=-60s", + "-compactor.deletion-mode=filter-and-delete", + ) + tIndexGateway = clu.AddComponent( + "index-gateway", + "-target=index-gateway", + ) + tDistributor = clu.AddComponent( + "distributor", + "-target=distributor", + ) + ) + require.NoError(t, clu.Run()) + + // then, run only the ingester and query scheduler. + var ( + tIngester = clu.AddComponent( + "ingester", + "-target=ingester", + "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + ) + tQueryScheduler = clu.AddComponent( + "query-scheduler", + "-target=query-scheduler", + "-query-scheduler.use-scheduler-ring=false", + "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + ) + ) + require.NoError(t, clu.Run()) + + // finally, run the query-frontend and querier. + var ( + tQueryFrontend = clu.AddComponent( + "query-frontend", + "-target=query-frontend", + "-frontend.scheduler-address="+tQueryScheduler.GRPCURL(), + "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + "-common.compactor-address="+tCompactor.HTTPURL(), + "-querier.per-request-limits-enabled=true", + "-frontend.required-query-response-format=protobuf", + ) + _ = clu.AddComponent( + "querier", + "-target=querier", + "-querier.scheduler-address="+tQueryScheduler.GRPCURL(), + "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + "-common.compactor-address="+tCompactor.HTTPURL(), + ) + ) + require.NoError(t, clu.Run()) + + tenantID := randStringRunes() + + now := time.Now() + cliDistributor := client.New(tenantID, "", tDistributor.HTTPURL()) + cliDistributor.Now = now + cliIngester := client.New(tenantID, "", tIngester.HTTPURL()) + cliIngester.Now = now + cliQueryFrontend := client.New(tenantID, "", tQueryFrontend.HTTPURL()) + cliQueryFrontend.Now = now + + t.Run("ingest-logs", func(t *testing.T) { + // ingest some log lines + require.NoError(t, cliDistributor.PushOTLPLogLine("lineA", now.Add(-45*time.Minute), map[string]any{"trace_id": 1, "user_id": "2"})) + require.NoError(t, cliDistributor.PushOTLPLogLine("lineB", now.Add(-45*time.Minute), nil)) + + require.NoError(t, cliDistributor.PushOTLPLogLine("lineC", now, map[string]any{"order.ids": []any{5, 6}})) + require.NoError(t, cliDistributor.PushOTLPLogLine("lineD", now, nil)) + }) + + t.Run("query", func(t *testing.T) { + resp, err := cliQueryFrontend.RunRangeQuery(context.Background(), `{service_name="varlog"}`) + require.NoError(t, err) + assert.Equal(t, "streams", resp.Data.ResultType) + + numLinesReceived := 0 + for i, stream := range resp.Data.Stream { + switch i { + case 0: + require.Len(t, stream.Values, 2) + require.Equal(t, "lineD", stream.Values[0][1]) + require.Equal(t, "lineB", stream.Values[1][1]) + require.Equal(t, map[string]string{ + "service_name": "varlog", + }, stream.Stream) + numLinesReceived += 2 + case 1: + require.Len(t, stream.Values, 1) + require.Equal(t, "lineA", stream.Values[0][1]) + require.Equal(t, map[string]string{ + "service_name": "varlog", + "trace_id": "1", + "user_id": "2", + }, stream.Stream) + numLinesReceived++ + case 2: + require.Len(t, stream.Values, 1) + require.Equal(t, "lineC", stream.Values[0][1]) + require.Equal(t, map[string]string{ + "service_name": "varlog", + "order_ids": "[5,6]", + }, stream.Stream) + numLinesReceived++ + default: + t.Errorf("unexpected case %d", i) + } + } + require.Equal(t, 4, numLinesReceived) + }) +} + func getValueFromMF(mf *dto.MetricFamily, lbs []*dto.LabelPair) float64 { for _, m := range mf.Metric { if !assert.ObjectsAreEqualValues(lbs, m.GetLabel()) { diff --git a/integration/loki_rule_eval_test.go b/integration/loki_rule_eval_test.go index bb4d76431ed03..41414d4aef67d 100644 --- a/integration/loki_rule_eval_test.go +++ b/integration/loki_rule_eval_test.go @@ -56,9 +56,9 @@ func testRuleEval(t *testing.T, mode string) { cliWrite.Now = now // 1. Ingest some logs - require.NoError(t, cliWrite.PushLogLineWithTimestamp("HEAD /", now, map[string]string{"method": "HEAD", "job": job})) - require.NoError(t, cliWrite.PushLogLineWithTimestamp("GET /", now, map[string]string{"method": "GET", "job": job})) - require.NoError(t, cliWrite.PushLogLineWithTimestamp("GET /", now.Add(time.Second), map[string]string{"method": "GET", "job": job})) + require.NoError(t, cliWrite.PushLogLine("HEAD /", now, nil, map[string]string{"method": "HEAD", "job": job})) + require.NoError(t, cliWrite.PushLogLine("GET /", now, nil, map[string]string{"method": "GET", "job": job})) + require.NoError(t, cliWrite.PushLogLine("GET /", now.Add(time.Second), nil, map[string]string{"method": "GET", "job": job})) // advance time to after the last ingested log line so queries don't return empty results now = now.Add(time.Second * 2) diff --git a/integration/loki_simple_scalable_test.go b/integration/loki_simple_scalable_test.go index 56c001bb2afbd..2de17e3420b85 100644 --- a/integration/loki_simple_scalable_test.go +++ b/integration/loki_simple_scalable_test.go @@ -51,11 +51,11 @@ func TestSimpleScalable_IngestQuery(t *testing.T) { t.Run("ingest logs", func(t *testing.T) { // ingest some log lines - require.NoError(t, cliWrite.PushLogLineWithTimestamp("lineA", now.Add(-45*time.Minute), map[string]string{"job": "fake"})) - require.NoError(t, cliWrite.PushLogLineWithTimestamp("lineB", now.Add(-45*time.Minute), map[string]string{"job": "fake"})) + require.NoError(t, cliWrite.PushLogLine("lineA", now.Add(-45*time.Minute), nil, map[string]string{"job": "fake"})) + require.NoError(t, cliWrite.PushLogLine("lineB", now.Add(-45*time.Minute), nil, map[string]string{"job": "fake"})) - require.NoError(t, cliWrite.PushLogLine("lineC", map[string]string{"job": "fake"})) - require.NoError(t, cliWrite.PushLogLine("lineD", map[string]string{"job": "fake"})) + require.NoError(t, cliWrite.PushLogLine("lineC", now, nil, map[string]string{"job": "fake"})) + require.NoError(t, cliWrite.PushLogLine("lineD", now, nil, map[string]string{"job": "fake"})) }) t.Run("query", func(t *testing.T) { diff --git a/integration/loki_single_binary_test.go b/integration/loki_single_binary_test.go index b2e2a2ac05630..16bb5b36944d4 100644 --- a/integration/loki_single_binary_test.go +++ b/integration/loki_single_binary_test.go @@ -29,11 +29,12 @@ func TestSingleBinaryIngestQuery(t *testing.T) { tenantID := randStringRunes() cli := client.New(tenantID, "", tAll.HTTPURL()) + now := time.Now() t.Run("ingest-logs-store", func(t *testing.T) { // ingest some log lines - require.NoError(t, cli.PushLogLineWithTimestamp("lineA", cli.Now.Add(-45*time.Minute), map[string]string{"job": "fake"})) - require.NoError(t, cli.PushLogLineWithTimestamp("lineB", cli.Now.Add(-45*time.Minute), map[string]string{"job": "fake"})) + require.NoError(t, cli.PushLogLine("lineA", cli.Now.Add(-45*time.Minute), nil, map[string]string{"job": "fake"})) + require.NoError(t, cli.PushLogLine("lineB", cli.Now.Add(-45*time.Minute), nil, map[string]string{"job": "fake"})) // TODO: Flushing is currently causing a panic, as the boltdb shipper is shared using a global variable in: // https://github.com/grafana/loki/blob/66a4692423582ed17cce9bd86b69d55663dc7721/pkg/storage/factory.go#L32-L35 @@ -42,8 +43,8 @@ func TestSingleBinaryIngestQuery(t *testing.T) { t.Run("ingest-logs-ingester", func(t *testing.T) { // ingest some log lines - require.NoError(t, cli.PushLogLine("lineC", map[string]string{"job": "fake"})) - require.NoError(t, cli.PushLogLine("lineD", map[string]string{"job": "fake"})) + require.NoError(t, cli.PushLogLine("lineC", now, nil, map[string]string{"job": "fake"})) + require.NoError(t, cli.PushLogLine("lineD", now, nil, map[string]string{"job": "fake"})) }) t.Run("query", func(t *testing.T) { diff --git a/integration/multi_tenant_queries_test.go b/integration/multi_tenant_queries_test.go index e044d04d6d6ee..468cc98de18a4 100644 --- a/integration/multi_tenant_queries_test.go +++ b/integration/multi_tenant_queries_test.go @@ -32,8 +32,8 @@ func TestMultiTenantQuery(t *testing.T) { cliMultitenant := client.New("org1|org2", "", tAll.HTTPURL()) // ingest log lines for tenant 1 and tenant 2. - require.NoError(t, cliTenant1.PushLogLineWithTimestamp("lineA", cliTenant1.Now.Add(-45*time.Minute), map[string]string{"job": "fake1"})) - require.NoError(t, cliTenant2.PushLogLineWithTimestamp("lineB", cliTenant2.Now.Add(-45*time.Minute), map[string]string{"job": "fake2"})) + require.NoError(t, cliTenant1.PushLogLine("lineA", cliTenant1.Now.Add(-45*time.Minute), nil, map[string]string{"job": "fake1"})) + require.NoError(t, cliTenant2.PushLogLine("lineB", cliTenant2.Now.Add(-45*time.Minute), nil, map[string]string{"job": "fake2"})) // check that tenant1 only have access to log line A. matchLines(t, cliTenant1, `{job="fake2"}`, []string{}) diff --git a/integration/per_request_limits_test.go b/integration/per_request_limits_test.go index 3df42d70cd592..85642e0439e61 100644 --- a/integration/per_request_limits_test.go +++ b/integration/per_request_limits_test.go @@ -35,7 +35,7 @@ func TestPerRequestLimits(t *testing.T) { cliTenant := client.New("org1", "", tAll.HTTPURL(), queryLimitsPolicy) // ingest log lines for tenant 1 and tenant 2. - require.NoError(t, cliTenant.PushLogLineWithTimestamp("lineA", cliTenant.Now.Add(-45*time.Minute), map[string]string{"job": "fake"})) + require.NoError(t, cliTenant.PushLogLine("lineA", cliTenant.Now.Add(-45*time.Minute), nil, map[string]string{"job": "fake"})) // check that per-rquest-limits are enforced _, err := cliTenant.RunRangeQuery(context.Background(), `{job="fake"}`) diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go index 29f0f80e394d9..67db8e5c5ba7b 100644 --- a/pkg/distributor/http.go +++ b/pkg/distributor/http.go @@ -19,6 +19,14 @@ import ( // PushHandler reads a snappy-compressed proto from the HTTP body. func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) { + d.pushHandler(w, r, push.ParseLokiRequest) +} + +func (d *Distributor) OTLPPushHandler(w http.ResponseWriter, r *http.Request) { + d.pushHandler(w, r, push.ParseOTLPRequest) +} + +func (d *Distributor) pushHandler(w http.ResponseWriter, r *http.Request, pushRequestParser push.RequestParser) { logger := util_log.WithContext(r.Context(), util_log.Logger) tenantID, err := tenant.TenantID(r.Context()) if err != nil { @@ -26,7 +34,7 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } - req, err := push.ParseRequest(logger, tenantID, r, d.tenantsRetention) + req, err := push.ParseRequest(logger, tenantID, r, d.tenantsRetention, pushRequestParser) if err != nil { if d.tenantConfigs.LogPushRequest(tenantID) { level.Debug(logger).Log( diff --git a/pkg/loghttp/push/otlp.go b/pkg/loghttp/push/otlp.go new file mode 100644 index 0000000000000..737e9b78ae72a --- /dev/null +++ b/pkg/loghttp/push/otlp.go @@ -0,0 +1,370 @@ +package push + +import ( + "compress/gzip" + "encoding/hex" + "fmt" + "io" + "net/http" + "sort" + "time" + + prometheustranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/plog/plogotlp" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/push" + loki_util "github.com/grafana/loki/pkg/util" +) + +const ( + pbContentType = "application/x-protobuf" + gzipContentEncoding = "gzip" +) + +var blessedAttributes = []string{ + "service.name", + "service.namespace", + "service.instance.id", + "deployment.environment", + "cloud.region", + "cloud.availability_zone", + "k8s.cluster.name", + "k8s.namespace.name", + "k8s.pod.name", + "k8s.container.name", + "container.name", + "k8s.replicaset.name", + "k8s.deployment.name", + "k8s.statefulset.name", + "k8s.daemonset.name", + "k8s.cronjob.name", + "k8s.job.name", +} + +var blessedAttributesNormalized = make([]string, len(blessedAttributes)) + +func init() { + for i := range blessedAttributes { + blessedAttributesNormalized[i] = prometheustranslator.NormalizeLabel(blessedAttributes[i]) + } +} + +func newPushStats() *Stats { + return &Stats{ + logLinesBytes: map[time.Duration]int64{}, + structuredMetadataBytes: map[time.Duration]int64{}, + } +} + +func ParseOTLPRequest(userID string, r *http.Request, tenantsRetention TenantsRetention) (*logproto.PushRequest, *Stats, error) { + stats := newPushStats() + otlpLogs, err := extractLogs(r, stats) + if err != nil { + return nil, nil, err + } + + req := otlpToLokiPushRequest(otlpLogs, userID, tenantsRetention, stats) + return req, stats, nil +} + +func extractLogs(r *http.Request, pushStats *Stats) (plog.Logs, error) { + pushStats.contentEncoding = r.Header.Get(contentEnc) + // bodySize should always reflect the compressed size of the request body + bodySize := loki_util.NewSizeReader(r.Body) + var body io.Reader = bodySize + if pushStats.contentEncoding == gzipContentEncoding { + r, err := gzip.NewReader(bodySize) + if err != nil { + return plog.NewLogs(), err + } + body = r + defer func(reader *gzip.Reader) { + _ = reader.Close() + }(r) + } + buf, err := io.ReadAll(body) + if err != nil { + return plog.NewLogs(), err + } + + pushStats.bodySize = bodySize.Size() + + req := plogotlp.NewExportRequest() + + pushStats.contentType = r.Header.Get(contentType) + switch pushStats.contentType { + case pbContentType: + err := req.UnmarshalProto(buf) + if err != nil { + return plog.NewLogs(), err + } + case applicationJSON: + err := req.UnmarshalJSON(buf) + if err != nil { + return plog.NewLogs(), err + } + default: + return plog.NewLogs(), + errors.Errorf( + "content type: %s is not supported", + r.Header.Get("Content-Type"), + ) + } + + return req.Logs(), nil +} + +func otlpToLokiPushRequest(ld plog.Logs, userID string, tenantsRetention TenantsRetention, stats *Stats) *logproto.PushRequest { + if ld.LogRecordCount() == 0 { + return &logproto.PushRequest{} + } + + rls := ld.ResourceLogs() + pushRequestsByStream := make(map[string]logproto.Stream, rls.Len()) + + for i := 0; i < rls.Len(); i++ { + sls := rls.At(i).ScopeLogs() + res := rls.At(i).Resource() + + flattenedResourceAttributes := labels.NewBuilder(logproto.FromLabelAdaptersToLabels(attributesToLabels(res.Attributes(), ""))) + // service.name is a required Resource Attribute. If it is not present, we will set it to "unknown_service". + if flattenedResourceAttributes.Get("service_name") == "" { + flattenedResourceAttributes = flattenedResourceAttributes.Set("service_name", "unknown_service") + } + + if dac := res.DroppedAttributesCount(); dac != 0 { + flattenedResourceAttributes = flattenedResourceAttributes.Set("resource_dropped_attributes_count", fmt.Sprintf("%d", dac)) + } + + // copy blessed attributes to stream labels + streamLabels := make(model.LabelSet, len(blessedAttributesNormalized)) + for _, ba := range blessedAttributesNormalized { + v := flattenedResourceAttributes.Get(ba) + if v == "" { + continue + } + streamLabels[model.LabelName(ba)] = model.LabelValue(v) + + // remove the blessed attributes copied to stream labels + flattenedResourceAttributes.Del(ba) + } + + if err := streamLabels.Validate(); err != nil { + stats.errs = append(stats.errs, fmt.Errorf("invalid labels: %w", err)) + continue + } + labelsStr := streamLabels.String() + + // convert the remaining resource attributes to structured metadata + resourceAttributesAsStructuredMetadata := logproto.FromLabelsToLabelAdapters(flattenedResourceAttributes.Labels()) + + lbs := modelLabelsSetToLabelsList(streamLabels) + if _, ok := pushRequestsByStream[labelsStr]; !ok { + pushRequestsByStream[labelsStr] = logproto.Stream{ + Labels: labelsStr, + } + stats.streamLabelsSize += int64(labelsSize(logproto.FromLabelsToLabelAdapters(lbs))) + } + + resourceAttributesAsStructuredMetadataSize := labelsSize(resourceAttributesAsStructuredMetadata) + stats.structuredMetadataBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += int64(resourceAttributesAsStructuredMetadataSize) + + for j := 0; j < sls.Len(); j++ { + scope := sls.At(j).Scope() + logs := sls.At(j).LogRecords() + + // it would be rare to have multiple scopes so if the entries slice is empty, pre-allocate it for the number of log entries + if cap(pushRequestsByStream[labelsStr].Entries) == 0 { + stream := pushRequestsByStream[labelsStr] + stream.Entries = make([]push.Entry, 0, logs.Len()) + pushRequestsByStream[labelsStr] = stream + } + + // use fields and attributes from scope as structured metadata + scopeAttributesAsStructuredMetadata := attributesToLabels(scope.Attributes(), "") + + if scopeName := scope.Name(); scopeName != "" { + scopeAttributesAsStructuredMetadata = append(scopeAttributesAsStructuredMetadata, push.LabelAdapter{ + Name: "scope_name", + Value: scopeName, + }) + } + if scopeVersion := scope.Version(); scopeVersion != "" { + scopeAttributesAsStructuredMetadata = append(scopeAttributesAsStructuredMetadata, push.LabelAdapter{ + Name: "scope_version", + Value: scopeVersion, + }) + } + if scopeDroppedAttributesCount := scope.DroppedAttributesCount(); scopeDroppedAttributesCount != 0 { + scopeAttributesAsStructuredMetadata = append(scopeAttributesAsStructuredMetadata, push.LabelAdapter{ + Name: "scope_dropped_attributes_count", + Value: fmt.Sprintf("%d", scopeDroppedAttributesCount), + }) + } + + scopeAttributesAsStructuredMetadataSize := labelsSize(scopeAttributesAsStructuredMetadata) + stats.structuredMetadataBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += int64(scopeAttributesAsStructuredMetadataSize) + for k := 0; k < logs.Len(); k++ { + log := logs.At(k) + + entry := otlpLogToPushEntry(log) + + // if entry.StructuredMetadata doesn't have capacity to add resource and scope attributes, make a new slice with enough capacity + attributesAsStructuredMetadataLen := len(resourceAttributesAsStructuredMetadata) + len(scopeAttributesAsStructuredMetadata) + if cap(entry.StructuredMetadata) < len(entry.StructuredMetadata)+attributesAsStructuredMetadataLen { + structuredMetadata := make(push.LabelsAdapter, 0, len(entry.StructuredMetadata)+len(scopeAttributesAsStructuredMetadata)+len(resourceAttributesAsStructuredMetadata)) + structuredMetadata = append(structuredMetadata, entry.StructuredMetadata...) + entry.StructuredMetadata = structuredMetadata + } + + entry.StructuredMetadata = append(entry.StructuredMetadata, resourceAttributesAsStructuredMetadata...) + entry.StructuredMetadata = append(entry.StructuredMetadata, scopeAttributesAsStructuredMetadata...) + stream := pushRequestsByStream[labelsStr] + stream.Entries = append(stream.Entries, entry) + pushRequestsByStream[labelsStr] = stream + + stats.structuredMetadataBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += int64(labelsSize(entry.StructuredMetadata) - resourceAttributesAsStructuredMetadataSize - scopeAttributesAsStructuredMetadataSize) + stats.logLinesBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += int64(len(entry.Line)) + stats.numLines++ + if entry.Timestamp.After(stats.mostRecentEntryTimestamp) { + stats.mostRecentEntryTimestamp = entry.Timestamp + } + } + } + } + + pr := &push.PushRequest{ + Streams: make([]push.Stream, 0, len(pushRequestsByStream)), + } + + for _, stream := range pushRequestsByStream { + pr.Streams = append(pr.Streams, stream) + } + + return pr +} + +// otlpLogToPushEntry converts an OTLP log record to a Loki push.Entry. +func otlpLogToPushEntry(log plog.LogRecord) push.Entry { + // copy log attributes and all the fields from log(except log.Body) to structured metadata + structuredMetadata := attributesToLabels(log.Attributes(), "") + + // if log.Timestamp() is 0, we would have already stored log.ObservedTimestamp as log timestamp so no need to store again in structured metadata + if log.Timestamp() != 0 && log.ObservedTimestamp() != 0 { + structuredMetadata = append(structuredMetadata, push.LabelAdapter{ + Name: "observed_timestamp", + Value: fmt.Sprintf("%d", log.ObservedTimestamp().AsTime().UnixNano()), + }) + } + + if severityNum := log.SeverityNumber(); severityNum != plog.SeverityNumberUnspecified { + structuredMetadata = append(structuredMetadata, push.LabelAdapter{ + Name: "severity_number", + Value: fmt.Sprintf("%d", severityNum), + }) + } + if severityText := log.SeverityText(); severityText != "" { + structuredMetadata = append(structuredMetadata, push.LabelAdapter{ + Name: "severity_text", + Value: severityText, + }) + } + + if droppedAttributesCount := log.DroppedAttributesCount(); droppedAttributesCount != 0 { + structuredMetadata = append(structuredMetadata, push.LabelAdapter{ + Name: "dropped_attributes_count", + Value: fmt.Sprintf("%d", droppedAttributesCount), + }) + } + if logRecordFlags := log.Flags(); logRecordFlags != 0 { + structuredMetadata = append(structuredMetadata, push.LabelAdapter{ + Name: "flags", + Value: fmt.Sprintf("%d", logRecordFlags), + }) + } + + if traceID := log.TraceID(); !traceID.IsEmpty() { + structuredMetadata = append(structuredMetadata, push.LabelAdapter{ + Name: "trace_id", + Value: hex.EncodeToString(traceID[:]), + }) + } + if spanID := log.SpanID(); !spanID.IsEmpty() { + structuredMetadata = append(structuredMetadata, push.LabelAdapter{ + Name: "span_id", + Value: hex.EncodeToString(spanID[:]), + }) + } + + return push.Entry{ + Timestamp: timestampFromLogRecord(log), + Line: log.Body().AsString(), + StructuredMetadata: structuredMetadata, + } +} + +func attributesToLabels(attrs pcommon.Map, prefix string) push.LabelsAdapter { + labelsAdapter := make(push.LabelsAdapter, 0, attrs.Len()) + if attrs.Len() == 0 { + return labelsAdapter + } + + attrs.Range(func(k string, v pcommon.Value) bool { + keyWithPrefix := k + if prefix != "" { + keyWithPrefix = prefix + "_" + k + } + keyWithPrefix = prometheustranslator.NormalizeLabel(keyWithPrefix) + + typ := v.Type() + if typ == pcommon.ValueTypeMap { + labelsAdapter = append(labelsAdapter, attributesToLabels(v.Map(), keyWithPrefix)...) + } else { + labelsAdapter = append(labelsAdapter, push.LabelAdapter{Name: keyWithPrefix, Value: v.AsString()}) + } + + return true + }) + + return labelsAdapter +} + +func timestampFromLogRecord(lr plog.LogRecord) time.Time { + if lr.Timestamp() != 0 { + return time.Unix(0, int64(lr.Timestamp())) + } + + if lr.ObservedTimestamp() != 0 { + return time.Unix(0, int64(lr.ObservedTimestamp())) + } + + return time.Unix(0, time.Now().UnixNano()) +} + +func labelsSize(lbls push.LabelsAdapter) int { + size := 0 + for _, lbl := range lbls { + size += len(lbl.Name) + len(lbl.Value) + } + + return size +} + +func modelLabelsSetToLabelsList(m model.LabelSet) labels.Labels { + l := make(labels.Labels, 0, len(m)) + for lName, lValue := range m { + l = append(l, labels.Label{ + Name: string(lName), + Value: string(lValue), + }) + } + + sort.Sort(l) + return l +} diff --git a/pkg/loghttp/push/otlp_test.go b/pkg/loghttp/push/otlp_test.go new file mode 100644 index 0000000000000..8018fbd5a1ae6 --- /dev/null +++ b/pkg/loghttp/push/otlp_test.go @@ -0,0 +1,444 @@ +package push + +import ( + "encoding/base64" + "fmt" + "testing" + "time" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/push" +) + +func TestOTLPToLokiPushRequest(t *testing.T) { + now := time.Unix(0, time.Now().UnixNano()) + + for _, tc := range []struct { + name string + generateLogs func() plog.Logs + expectedPushRequest logproto.PushRequest + expectedStats Stats + }{ + { + name: "no logs", + generateLogs: func() plog.Logs { + return plog.NewLogs() + }, + expectedPushRequest: logproto.PushRequest{}, + expectedStats: *newPushStats(), + }, + { + name: "resource with no logs", + generateLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty().Resource().Attributes().PutStr("service.name", "service-1") + return ld + }, + expectedPushRequest: logproto.PushRequest{}, + expectedStats: *newPushStats(), + }, + { + name: "resource with a log entry", + generateLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty().Resource().Attributes().PutStr("service.name", "service-1") + ld.ResourceLogs().At(0).ScopeLogs().AppendEmpty().LogRecords().AppendEmpty().Body().SetStr("test body") + ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).SetTimestamp(pcommon.Timestamp(now.UnixNano())) + return ld + }, + expectedPushRequest: logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: `{service_name="service-1"}`, + Entries: []logproto.Entry{ + { + Timestamp: now, + Line: "test body", + StructuredMetadata: push.LabelsAdapter{}, + }, + }, + }, + }, + }, + expectedStats: Stats{ + numLines: 1, + logLinesBytes: map[time.Duration]int64{ + time.Hour: 9, + }, + structuredMetadataBytes: map[time.Duration]int64{ + time.Hour: 0, + }, + streamLabelsSize: 21, + mostRecentEntryTimestamp: now, + }, + }, + { + name: "resource attributes and scope attributes stored as structured metadata", + generateLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty() + ld.ResourceLogs().At(0).Resource().Attributes().PutStr("service.name", "service-1") + ld.ResourceLogs().At(0).Resource().Attributes().PutStr("service.image", "loki") + ld.ResourceLogs().At(0).ScopeLogs().AppendEmpty() + ld.ResourceLogs().At(0).ScopeLogs().At(0).Scope().SetName("fizz") + ld.ResourceLogs().At(0).ScopeLogs().At(0).Scope().Attributes().PutStr("op", "buzz") + for i := 0; i < 2; i++ { + ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().AppendEmpty() + ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(i).Body().SetStr(fmt.Sprintf("test body - %d", i)) + ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(i).SetTimestamp(pcommon.Timestamp(now.UnixNano())) + } + return ld + }, + expectedPushRequest: logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: `{service_name="service-1"}`, + Entries: []logproto.Entry{ + { + Timestamp: now, + Line: "test body - 0", + StructuredMetadata: push.LabelsAdapter{ + { + Name: "service_image", + Value: "loki", + }, + { + Name: "op", + Value: "buzz", + }, + { + Name: "scope_name", + Value: "fizz", + }, + }, + }, + { + Timestamp: now, + Line: "test body - 1", + StructuredMetadata: push.LabelsAdapter{ + { + Name: "service_image", + Value: "loki", + }, + { + Name: "op", + Value: "buzz", + }, + { + Name: "scope_name", + Value: "fizz", + }, + }, + }, + }, + }, + }, + }, + expectedStats: Stats{ + numLines: 2, + logLinesBytes: map[time.Duration]int64{ + time.Hour: 26, + }, + structuredMetadataBytes: map[time.Duration]int64{ + time.Hour: 37, + }, + streamLabelsSize: 21, + mostRecentEntryTimestamp: now, + }, + }, + { + name: "attributes with nested data", + generateLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty() + ld.ResourceLogs().At(0).Resource().Attributes().PutStr("service.name", "service-1") + ld.ResourceLogs().At(0).Resource().Attributes().PutEmptyMap("resource.nested").PutStr("foo", "bar") + ld.ResourceLogs().At(0).ScopeLogs().AppendEmpty() + ld.ResourceLogs().At(0).ScopeLogs().At(0).Scope().SetName("fizz") + ld.ResourceLogs().At(0).ScopeLogs().At(0).Scope().Attributes().PutEmptyMap("scope.nested").PutStr("foo", "bar") + for i := 0; i < 2; i++ { + ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().AppendEmpty() + ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(i).Body().SetStr(fmt.Sprintf("test body - %d", i)) + ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(i).SetTimestamp(pcommon.Timestamp(now.UnixNano())) + ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(i).Attributes().PutEmptyMap("log.nested").PutStr("foo", fmt.Sprintf("bar - %d", i)) + } + return ld + }, + expectedPushRequest: logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: `{service_name="service-1"}`, + Entries: []logproto.Entry{ + { + Timestamp: now, + Line: "test body - 0", + StructuredMetadata: push.LabelsAdapter{ + { + Name: "log_nested_foo", + Value: "bar - 0", + }, + { + Name: "resource_nested_foo", + Value: "bar", + }, + { + Name: "scope_nested_foo", + Value: "bar", + }, + { + Name: "scope_name", + Value: "fizz", + }, + }, + }, + { + Timestamp: now, + Line: "test body - 1", + StructuredMetadata: push.LabelsAdapter{ + { + Name: "log_nested_foo", + Value: "bar - 1", + }, + { + Name: "resource_nested_foo", + Value: "bar", + }, + { + Name: "scope_nested_foo", + Value: "bar", + }, + { + Name: "scope_name", + Value: "fizz", + }, + }, + }, + }, + }, + }, + }, + expectedStats: Stats{ + numLines: 2, + logLinesBytes: map[time.Duration]int64{ + time.Hour: 26, + }, + structuredMetadataBytes: map[time.Duration]int64{ + time.Hour: 97, + }, + streamLabelsSize: 21, + mostRecentEntryTimestamp: now, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + stats := newPushStats() + pushReq := otlpToLokiPushRequest(tc.generateLogs(), "foo", fakeRetention{}, stats) + require.Equal(t, tc.expectedPushRequest, *pushReq) + require.Equal(t, tc.expectedStats, *stats) + }) + } +} + +func TestOTLPLogToPushEntry(t *testing.T) { + now := time.Unix(0, time.Now().UnixNano()) + + for _, tc := range []struct { + name string + buildLogRecord func() plog.LogRecord + expectedResp push.Entry + }{ + { + name: "only body and timestamp set", + buildLogRecord: func() plog.LogRecord { + log := plog.NewLogRecord() + log.Body().SetStr("log body") + log.SetTimestamp(pcommon.Timestamp(now.UnixNano())) + return log + }, + expectedResp: push.Entry{ + Timestamp: now, + Line: "log body", + StructuredMetadata: push.LabelsAdapter{}, + }, + }, + { + name: "all the values set", + buildLogRecord: func() plog.LogRecord { + log := plog.NewLogRecord() + log.Body().SetStr("log body") + log.SetTimestamp(pcommon.Timestamp(now.UnixNano())) + log.SetObservedTimestamp(pcommon.Timestamp(now.UnixNano() + 1)) + log.SetSeverityNumber(plog.SeverityNumberDebug) + log.SetSeverityText("debug") + log.SetDroppedAttributesCount(1) + log.SetFlags(plog.DefaultLogRecordFlags.WithIsSampled(true)) + log.SetTraceID([16]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}) + log.SetSpanID([8]byte{0x12, 0x23, 0xAD, 0x12, 0x23, 0xAD, 0x12, 0x23}) + log.Attributes().PutStr("foo", "bar") + + return log + }, + expectedResp: push.Entry{ + Timestamp: now, + Line: "log body", + StructuredMetadata: push.LabelsAdapter{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "observed_timestamp", + Value: fmt.Sprintf("%d", now.UnixNano()+1), + }, + { + Name: "severity_number", + Value: "5", + }, + { + Name: "severity_text", + Value: "debug", + }, + { + Name: "dropped_attributes_count", + Value: "1", + }, + { + Name: "flags", + Value: fmt.Sprintf("%d", plog.DefaultLogRecordFlags.WithIsSampled(true)), + }, + { + Name: "trace_id", + Value: "12345678123456781234567812345678", + }, + { + Name: "span_id", + Value: "1223ad1223ad1223", + }, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.expectedResp, otlpLogToPushEntry(tc.buildLogRecord())) + }) + } + +} + +func TestAttributesToLabels(t *testing.T) { + for _, tc := range []struct { + name string + buildAttrs func() pcommon.Map + expectedResp push.LabelsAdapter + }{ + { + name: "no attributes", + buildAttrs: func() pcommon.Map { + return pcommon.NewMap() + }, + expectedResp: push.LabelsAdapter{}, + }, + { + name: "with attributes", + buildAttrs: func() pcommon.Map { + attrs := pcommon.NewMap() + attrs.PutEmpty("empty") + attrs.PutStr("str", "val") + attrs.PutInt("int", 1) + attrs.PutDouble("double", 3.14) + attrs.PutBool("bool", true) + attrs.PutEmptyBytes("bytes").Append(1, 2, 3) + + slice := attrs.PutEmptySlice("slice") + slice.AppendEmpty().SetInt(1) + slice.AppendEmpty().SetEmptySlice().AppendEmpty().SetStr("foo") + slice.AppendEmpty().SetEmptyMap().PutStr("fizz", "buzz") + + m := attrs.PutEmptyMap("nested") + m.PutStr("foo", "bar") + m.PutEmptyMap("more").PutStr("key", "val") + + return attrs + }, + expectedResp: push.LabelsAdapter{ + { + Name: "empty", + }, + { + Name: "str", + Value: "val", + }, + { + Name: "int", + Value: "1", + }, + { + Name: "double", + Value: "3.14", + }, + { + Name: "bool", + Value: "true", + }, + { + Name: "bytes", + Value: base64.StdEncoding.EncodeToString([]byte{1, 2, 3}), + }, + { + Name: "slice", + Value: `[1,["foo"],{"fizz":"buzz"}]`, + }, + { + Name: "nested_foo", + Value: "bar", + }, + { + Name: "nested_more_key", + Value: "val", + }, + }, + }, + { + name: "attributes with special chars", + buildAttrs: func() pcommon.Map { + attrs := pcommon.NewMap() + attrs.PutStr("st.r", "val") + + m := attrs.PutEmptyMap("nest*ed") + m.PutStr("fo@o", "bar") + m.PutEmptyMap("m$ore").PutStr("k_ey", "val") + + return attrs + }, + expectedResp: push.LabelsAdapter{ + { + Name: "st_r", + Value: "val", + }, + { + Name: "nest_ed_fo_o", + Value: "bar", + }, + { + Name: "nest_ed_m_ore_k_ey", + Value: "val", + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.expectedResp, attributesToLabels(tc.buildAttrs(), "")) + }) + } +} + +type fakeRetention struct{} + +func (f fakeRetention) RetentionPeriodFor(userID string, lbs labels.Labels) time.Duration { + return time.Hour +} diff --git a/pkg/loghttp/push/push.go b/pkg/loghttp/push/push.go index a6bf07a1aa267..ab6c41a9839ff 100644 --- a/pkg/loghttp/push/push.go +++ b/pkg/loghttp/push/push.go @@ -57,7 +57,80 @@ type TenantsRetention interface { RetentionPeriodFor(userID string, lbs labels.Labels) time.Duration } -func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRetention TenantsRetention) (*logproto.PushRequest, error) { +type RequestParser func(userID string, r *http.Request, tenantsRetention TenantsRetention) (*logproto.PushRequest, *Stats, error) + +type Stats struct { + errs []error + numLines int64 + logLinesBytes map[time.Duration]int64 + structuredMetadataBytes map[time.Duration]int64 + streamLabelsSize int64 + mostRecentEntryTimestamp time.Time + contentType string + contentEncoding string + bodySize int64 +} + +func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRetention TenantsRetention, pushRequestParser RequestParser) (*logproto.PushRequest, error) { + req, pushStats, err := pushRequestParser(userID, r, tenantsRetention) + if err != nil { + return nil, err + } + + var ( + entriesSize int64 + structuredMetadataSize int64 + ) + for retentionPeriod, size := range pushStats.logLinesBytes { + var retentionHours string + if retentionPeriod > 0 { + retentionHours = fmt.Sprintf("%d", int64(math.Floor(retentionPeriod.Hours()))) + } + + bytesIngested.WithLabelValues(userID, retentionHours).Add(float64(size)) + bytesReceivedStats.Inc(size) + entriesSize += size + } + + for retentionPeriod, size := range pushStats.structuredMetadataBytes { + var retentionHours string + if retentionPeriod > 0 { + retentionHours = fmt.Sprintf("%d", int64(math.Floor(retentionPeriod.Hours()))) + } + + structuredMetadataBytesIngested.WithLabelValues(userID, retentionHours).Add(float64(size)) + bytesIngested.WithLabelValues(userID, retentionHours).Add(float64(size)) + bytesReceivedStats.Inc(size) + structuredMetadataBytesReceivedStats.Inc(size) + + entriesSize += size + structuredMetadataSize += size + } + + // incrementing tenant metrics if we have a tenant. + if pushStats.numLines != 0 && userID != "" { + linesIngested.WithLabelValues(userID).Add(float64(pushStats.numLines)) + } + linesReceivedStats.Inc(pushStats.numLines) + + level.Debug(logger).Log( + "msg", "push request parsed", + "path", r.URL.Path, + "contentType", pushStats.contentType, + "contentEncoding", pushStats.contentEncoding, + "bodySize", humanize.Bytes(uint64(pushStats.bodySize)), + "streams", len(req.Streams), + "entries", pushStats.numLines, + "streamLabelsSize", humanize.Bytes(uint64(pushStats.streamLabelsSize)), + "entriesSize", humanize.Bytes(uint64(entriesSize)), + "structuredMetadataSize", humanize.Bytes(uint64(structuredMetadataSize)), + "totalSize", humanize.Bytes(uint64(entriesSize+pushStats.streamLabelsSize)), + "mostRecentLagMs", time.Since(pushStats.mostRecentEntryTimestamp).Milliseconds(), + ) + return req, nil +} + +func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRetention) (*logproto.PushRequest, *Stats, error) { // Body var body io.Reader // bodySize should always reflect the compressed size of the request body @@ -74,7 +147,7 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete case "gzip": gzipReader, err := gzip.NewReader(bodySize) if err != nil { - return nil, err + return nil, nil, err } defer gzipReader.Close() body = gzipReader @@ -83,21 +156,18 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete defer flateReader.Close() body = flateReader default: - return nil, fmt.Errorf("Content-Encoding %q not supported", contentEncoding) + return nil, nil, fmt.Errorf("Content-Encoding %q not supported", contentEncoding) } contentType := r.Header.Get(contentType) var ( - entriesSize int64 - structuredMetadataSize int64 - streamLabelsSize int64 - totalEntries int64 - req logproto.PushRequest + req logproto.PushRequest + pushStats = newPushStats() ) contentType, _ /* params */, err := mime.ParseMediaType(contentType) if err != nil { - return nil, err + return nil, nil, err } switch contentType { @@ -114,67 +184,44 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete } if err != nil { - return nil, err + return nil, nil, err } default: // When no content-type header is set or when it is set to // `application/x-protobuf`: expect snappy compression. if err := util.ParseProtoReader(r.Context(), body, int(r.ContentLength), math.MaxInt32, &req, util.RawSnappy); err != nil { - return nil, err + return nil, nil, err } } - mostRecentEntry := time.Unix(0, 0) + pushStats.bodySize = bodySize.Size() + pushStats.contentType = contentType + pushStats.contentEncoding = contentEncoding for _, s := range req.Streams { - streamLabelsSize += int64(len(s.Labels)) - var retentionHours string + pushStats.streamLabelsSize += int64(len(s.Labels)) + var retentionPeriod time.Duration if tenantsRetention != nil { lbs, err := syntax.ParseLabels(s.Labels) if err != nil { - return nil, fmt.Errorf("couldn't parse labels: %w", err) + return nil, nil, fmt.Errorf("couldn't parse labels: %w", err) } - retentionHours = fmt.Sprintf("%d", int64(math.Floor(tenantsRetention.RetentionPeriodFor(userID, lbs).Hours()))) + retentionPeriod = tenantsRetention.RetentionPeriodFor(userID, lbs) } for _, e := range s.Entries { - totalEntries++ + pushStats.numLines++ var entryLabelsSize int64 for _, l := range e.StructuredMetadata { entryLabelsSize += int64(len(l.Name) + len(l.Value)) } - entrySize := int64(len(e.Line)) + entryLabelsSize - entriesSize += entrySize - structuredMetadataSize += entryLabelsSize - bytesIngested.WithLabelValues(userID, retentionHours).Add(float64(entrySize)) - structuredMetadataBytesIngested.WithLabelValues(userID, retentionHours).Add(float64(entryLabelsSize)) - bytesReceivedStats.Inc(entrySize) - structuredMetadataBytesReceivedStats.Inc(entryLabelsSize) - if e.Timestamp.After(mostRecentEntry) { - mostRecentEntry = e.Timestamp + pushStats.logLinesBytes[retentionPeriod] += int64(len(e.Line)) + pushStats.structuredMetadataBytes[retentionPeriod] += entryLabelsSize + if e.Timestamp.After(pushStats.mostRecentEntryTimestamp) { + pushStats.mostRecentEntryTimestamp = e.Timestamp } } } - // incrementing tenant metrics if we have a tenant. - if totalEntries != 0 && userID != "" { - linesIngested.WithLabelValues(userID).Add(float64(totalEntries)) - } - linesReceivedStats.Inc(totalEntries) - - level.Debug(logger).Log( - "msg", "push request parsed", - "path", r.URL.Path, - "contentType", contentType, - "contentEncoding", contentEncoding, - "bodySize", humanize.Bytes(uint64(bodySize.Size())), - "streams", len(req.Streams), - "entries", totalEntries, - "streamLabelsSize", humanize.Bytes(uint64(streamLabelsSize)), - "entriesSize", humanize.Bytes(uint64(entriesSize)), - "structuredMetadataSize", humanize.Bytes(uint64(structuredMetadataSize)), - "totalSize", humanize.Bytes(uint64(entriesSize+streamLabelsSize)), - "mostRecentLagMs", time.Since(mostRecentEntry).Milliseconds(), - ) - return &req, nil + return &req, pushStats, nil } diff --git a/pkg/loghttp/push/push_test.go b/pkg/loghttp/push/push_test.go index 9ca45e72667f6..286b0e013a241 100644 --- a/pkg/loghttp/push/push_test.go +++ b/pkg/loghttp/push/push_test.go @@ -200,7 +200,7 @@ func TestParseRequest(t *testing.T) { request.Header.Add("Content-Encoding", test.contentEncoding) } - data, err := ParseRequest(util_log.Logger, "fake", request, nil) + data, err := ParseRequest(util_log.Logger, "fake", request, nil, ParseLokiRequest) structuredMetadataBytesReceived := int(structuredMetadataBytesReceivedStats.Value()["total"].(int64)) - previousStructuredMetadataBytesReceived previousStructuredMetadataBytesReceived += structuredMetadataBytesReceived diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 432bbd5d51938..f5a46e2164ed4 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -329,10 +329,13 @@ func (t *Loki) initDistributor() (services.Service, error) { tenant.WithDefaultResolver(tenant.NewMultiResolver()) } - pushHandler := middleware.Merge( + httpPushHandlerMiddleware := middleware.Merge( serverutil.RecoveryHTTPMiddleware, t.HTTPAuthMiddleware, - ).Wrap(http.HandlerFunc(t.distributor.PushHandler)) + ) + + lokiPushHandler := httpPushHandlerMiddleware.Wrap(http.HandlerFunc(t.distributor.PushHandler)) + otlpPushHandler := httpPushHandlerMiddleware.Wrap(http.HandlerFunc(t.distributor.OTLPPushHandler)) t.Server.HTTP.Path("/distributor/ring").Methods("GET", "POST").Handler(t.distributor) @@ -340,8 +343,9 @@ func (t *Loki) initDistributor() (services.Service, error) { t.InternalServer.HTTP.Path("/distributor/ring").Methods("GET", "POST").Handler(t.distributor) } - t.Server.HTTP.Path("/api/prom/push").Methods("POST").Handler(pushHandler) - t.Server.HTTP.Path("/loki/api/v1/push").Methods("POST").Handler(pushHandler) + t.Server.HTTP.Path("/api/prom/push").Methods("POST").Handler(lokiPushHandler) + t.Server.HTTP.Path("/loki/api/v1/push").Methods("POST").Handler(lokiPushHandler) + t.Server.HTTP.Path("/otlp/v1/logs").Methods("POST").Handler(otlpPushHandler) return t.distributor, nil } diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json index 2aba84cd04f3a..2c8e1e1e07577 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-individual.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -1,12 +1,12 @@ { - "bigquery": "1.52.0", - "bigtable": "1.18.1", - "datastore": "1.12.0", + "bigquery": "1.53.0", + "bigtable": "1.19.0", + "datastore": "1.13.0", "errorreporting": "0.3.0", "firestore": "1.11.0", "logging": "1.7.0", "profiler": "0.3.1", - "pubsub": "1.32.0", + "pubsub": "1.33.0", "pubsublite": "1.8.1", "spanner": "1.47.0", "storage": "1.31.0" diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index 69bed3de5765c..c02b65f486807 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,10 +1,11 @@ { "accessapproval": "1.7.1", "accesscontextmanager": "1.8.1", - "advisorynotifications": "0.3.1", - "aiplatform": "1.45.0", - "alloydb": "1.2.1", - "analytics": "0.21.2", + "advisorynotifications": "1.0.0", + "ai": "0.1.1", + "aiplatform": "1.48.0", + "alloydb": "1.4.0", + "analytics": "0.21.3", "apigateway": "1.6.1", "apigeeconnect": "1.6.1", "apigeeregistry": "0.7.1", @@ -16,63 +17,64 @@ "assuredworkloads": "1.11.1", "automl": "1.13.1", "baremetalsolution": "1.1.1", - "batch": "1.3.0", - "beyondcorp": "0.6.1", + "batch": "1.3.1", + "beyondcorp": "1.0.0", "billing": "1.16.0", "binaryauthorization": "1.6.1", "certificatemanager": "1.7.1", "channel": "1.16.0", - "cloudbuild": "1.10.1", + "cloudbuild": "1.13.0", "clouddms": "1.6.1", - "cloudtasks": "1.11.1", - "compute": "1.20.1", + "cloudtasks": "1.12.1", + "commerce": "0.1.0", + "compute": "1.23.0", "compute/metadata": "0.2.3", - "confidentialcomputing": "0.3.1", - "contactcenterinsights": "1.9.1", - "container": "1.22.1", + "confidentialcomputing": "1.1.0", + "contactcenterinsights": "1.10.0", + "container": "1.24.0", "containeranalysis": "0.10.1", - "datacatalog": "1.14.1", + "datacatalog": "1.16.0", "dataflow": "0.9.1", "dataform": "0.8.1", "datafusion": "1.7.1", "datalabeling": "0.8.1", - "dataplex": "1.8.1", + "dataplex": "1.9.0", "dataproc": "2.0.1", "dataqna": "0.8.1", - "datastream": "1.9.1", - "deploy": "1.11.0", - "dialogflow": "1.38.0", - "discoveryengine": "0.5.0", + "datastream": "1.10.0", + "deploy": "1.13.0", + "dialogflow": "1.40.0", + "discoveryengine": "1.1.0", "dlp": "1.10.1", - "documentai": "1.20.0", + "documentai": "1.22.0", "domains": "0.9.1", "edgecontainer": "1.1.1", "essentialcontacts": "1.6.2", - "eventarc": "1.12.1", + "eventarc": "1.13.0", "filestore": "1.7.1", "functions": "1.15.1", - "gaming": "1.10.1", "gkebackup": "1.3.0", "gkeconnect": "0.8.1", "gkehub": "0.14.1", - "gkemulticloud": "0.6.1", + "gkemulticloud": "1.0.0", "grafeas": "0.3.1", "gsuiteaddons": "1.6.1", "iam": "1.1.1", "iap": "1.8.1", "ids": "1.4.1", "iot": "1.7.1", - "kms": "1.12.1", + "kms": "1.15.0", "language": "1.10.1", "lifesciences": "0.9.1", "longrunning": "0.5.1", "managedidentities": "1.6.1", - "maps": "1.2.1", + "maps": "1.4.0", "mediatranslation": "0.8.1", "memcache": "1.10.1", - "metastore": "1.11.1", + "metastore": "1.12.0", "migrationcenter": "0.1.0", "monitoring": "1.15.1", + "netapp": "0.1.0", "networkconnectivity": "1.12.1", "networkmanagement": "1.8.0", "networksecurity": "0.9.1", @@ -83,9 +85,10 @@ "osconfig": "1.12.1", "oslogin": "1.10.1", "phishingprotection": "0.8.1", - "policytroubleshooter": "1.7.1", + "policysimulator": "0.1.0", + "policytroubleshooter": "1.8.0", "privatecatalog": "0.9.1", - "rapidmigrationassessment": "0.1.2", + "rapidmigrationassessment": "1.0.0", "recaptchaenterprise": "2.7.2", "recommendationengine": "0.8.1", "recommender": "1.10.1", @@ -93,30 +96,30 @@ "resourcemanager": "1.9.1", "resourcesettings": "1.6.1", "retail": "1.14.1", - "run": "1.1.1", + "run": "1.2.0", "scheduler": "1.10.1", "secretmanager": "1.11.1", "security": "1.15.1", "securitycenter": "1.23.0", "servicecontrol": "1.12.1", - "servicedirectory": "1.10.1", + "servicedirectory": "1.11.0", "servicemanagement": "1.9.2", "serviceusage": "1.7.1", "shell": "1.7.1", - "speech": "1.17.1", - "storageinsights": "0.2.2", + "speech": "1.19.0", + "storageinsights": "1.0.0", "storagetransfer": "1.10.0", - "support": "0.2.2", + "support": "1.0.0", "talent": "1.6.2", "texttospeech": "1.7.1", "tpu": "1.6.1", "trace": "1.10.1", - "translate": "1.8.1", - "video": "1.17.1", + "translate": "1.8.2", + "video": "1.19.0", "videointelligence": "1.11.1", "vision": "2.7.2", "vmmigration": "1.7.1", - "vmwareengine": "0.4.1", + "vmwareengine": "1.0.0", "vpcaccess": "1.7.1", "webrisk": "1.9.1", "websecurityscanner": "1.6.1", diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json index 1d7c78e855f2d..7c95dc1b009fd 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.110.4" + ".": "0.110.7" } diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 82c0c2f08e4d5..f500b8ee9bf37 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,26 @@ # Changes +## [0.110.7](https://github.com/googleapis/google-cloud-go/compare/v0.110.6...v0.110.7) (2023-07-31) + + +### Bug Fixes + +* **main:** Add more docs to base package ([c401ab4](https://github.com/googleapis/google-cloud-go/commit/c401ab4a576c64ab2b8840a90f7ccd5d031cea57)) + +## [0.110.6](https://github.com/googleapis/google-cloud-go/compare/v0.110.5...v0.110.6) (2023-07-13) + + +### Bug Fixes + +* **httpreplay:** Ignore GCS header by default ([#8260](https://github.com/googleapis/google-cloud-go/issues/8260)) ([b961a1a](https://github.com/googleapis/google-cloud-go/commit/b961a1abe7aeafe420c88eed38035fed0bbf7bbe)), refs [#8233](https://github.com/googleapis/google-cloud-go/issues/8233) + +## [0.110.5](https://github.com/googleapis/google-cloud-go/compare/v0.110.4...v0.110.5) (2023-07-07) + + +### Bug Fixes + +* **logadmin:** Use consistent filter in paging example ([#8221](https://github.com/googleapis/google-cloud-go/issues/8221)) ([9570159](https://github.com/googleapis/google-cloud-go/commit/95701597b1d709543ea22a4b6ff9b28b14a2d4fc)) + ## [0.110.4](https://github.com/googleapis/google-cloud-go/compare/v0.110.3...v0.110.4) (2023-07-05) diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index 6d6e48b65bd72..3a391131aa841 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -125,6 +125,7 @@ variables: bamboo-shift-455) for the general project. - `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general project's service account. +- `GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES`: Comma separated list of developer's Datastore databases. If not provided, default database i.e. empty string is used. - `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project. - `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the @@ -153,8 +154,9 @@ $ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID # Authenticates the gcloud tool with your account. $ gcloud auth login -# Create the indexes used in the datastore integration tests. -$ gcloud datastore indexes create datastore/testdata/index.yaml +# Create the indexes for all the databases you want to use in the datastore integration tests. +# Use empty string as databaseID or skip database flag for default database. +$ gcloud alpha datastore indexes create --database=your-databaseID-1 --project=$GCLOUD_TESTS_GOLANG_PROJECT_ID testdata/index.yaml # Creates a Google Cloud storage bucket with the same name as your test project, # and with the Cloud Logging service account as owner, for the sink @@ -219,6 +221,10 @@ export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project # The path to the JSON key file of the general project's service account. export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json +# Comma separated list of developer's Datastore databases. If not provided, +# default database i.e. empty string is used. +export GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES=your-database-1,your-database-2 + # Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project. export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index eddfee04b0b3f..6395537003224 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.22.0" +const Version = "1.23.0" diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go index 686caace4faba..d15db660e6a24 100644 --- a/vendor/cloud.google.com/go/doc.go +++ b/vendor/cloud.google.com/go/doc.go @@ -14,27 +14,25 @@ /* Package cloud is the root of the packages used to access Google Cloud -Services. See https://godoc.org/cloud.google.com/go for a full list -of sub-packages. +Services. See https://pkg.go.dev/cloud.google.com/go for a full list +of sub-modules. # Client Options -All clients in sub-packages are configurable via client options. These options are -described here: https://godoc.org/google.golang.org/api/option. +All clients in sub-packages are configurable via client options. These options +are described here: https://pkg.go.dev/google.golang.org/api/option. -## Endpoint Override +# Endpoint Override Endpoint configuration is used to specify the URL to which requests are -sent. It is used for services that support or require regional endpoints, as well -as for other use cases such as [testing against fake -servers](https://github.com/googleapis/google-cloud-go/blob/main/testing.md#testing-grpc-services-using-fakes). +sent. It is used for services that support or require regional endpoints, as +well as for other use cases such as [testing against fake servers]. -For example, the Vertex AI service recommends that you configure the endpoint to the -location with the features you want that is closest to your physical location or the -location of your users. There is no global endpoint for Vertex AI. See -[Vertex AI - Locations](https://cloud.google.com/vertex-ai/docs/general/locations) -for more details. The following example demonstrates configuring a Vertex AI client -with a regional endpoint: +For example, the Vertex AI service recommends that you configure the endpoint to +the location with the features you want that is closest to your physical +location or the location of your users. There is no global endpoint for Vertex +AI. See [Vertex AI - Locations] for more details. The following example +demonstrates configuring a Vertex AI client with a regional endpoint: ctx := context.Background() endpoint := "us-central1-aiplatform.googleapis.com:443" @@ -42,15 +40,16 @@ with a regional endpoint: # Authentication and Authorization -All the clients in sub-packages support authentication via Google Application Default -Credentials (see https://cloud.google.com/docs/authentication/production), or -by providing a JSON key file for a Service Account. See examples below. +All of the clients support authentication via [Google Application Default Credentials], +or by providing a JSON key file for a Service Account. See examples below. Google Application Default Credentials (ADC) is the recommended way to authorize and authenticate clients. For information on how to create and obtain Application Default Credentials, see -https://cloud.google.com/docs/authentication/production. Here is an example -of a client using ADC to authenticate: +https://cloud.google.com/docs/authentication/production. If you have your +environment configured correctly you will not need to pass any extra information +to the client libraries. Here is an example of a client using ADC to +authenticate: client, err := secretmanager.NewClient(context.Background()) if err != nil { @@ -58,12 +57,11 @@ of a client using ADC to authenticate: } _ = client // Use the client. -You can use a file with credentials to authenticate and authorize, such as a JSON -key file associated with a Google service account. Service Account keys can be -created and downloaded from -https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses -the Secret Manger client, but the same steps apply to the other client libraries -underneath this package. Example: +You can use a file with credentials to authenticate and authorize, such as a +JSON key file associated with a Google service account. Service Account keys can +be created and downloaded from https://console.cloud.google.com/iam-admin/serviceaccounts. +This example uses the Secret Manger client, but the same steps apply to the +all other client libraries this package as well. Example: client, err := secretmanager.NewClient(context.Background(), option.WithCredentialsFile("/path/to/service-account-key.json")) @@ -74,14 +72,14 @@ underneath this package. Example: In some cases (for instance, you don't want to store secrets on disk), you can create credentials from in-memory JSON and use the WithCredentials option. -The google package in this example is at golang.org/x/oauth2/google. This example uses the Secret Manager client, but the same steps apply to -the other client libraries underneath this package. Note that scopes can be +all other client libraries as well. Note that scopes can be found at https://developers.google.com/identity/protocols/oauth2/scopes, and are also provided in all auto-generated libraries: for example, cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: ctx := context.Background() + // https://pkg.go.dev/golang.org/x/oauth2/google creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) if err != nil { // TODO: handle error. @@ -97,10 +95,11 @@ cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: By default, non-streaming methods, like Create or Get, will have a default deadline applied to the context provided at call time, unless a context deadline is already set. Streaming methods have no default deadline and will run -indefinitely. To set timeouts or arrange for cancellation, use contexts. -Transient errors will be retried when correctness allows. +indefinitely. To set timeouts or arrange for cancellation, use +[context]. Transient errors will be retried when correctness allows. -Here is an example of setting a timeout for an RPC using context.WithTimeout: +Here is an example of setting a timeout for an RPC using +[context.WithTimeout]: ctx := context.Background() // Do not set a timeout on the context passed to NewClient: dialing happens @@ -119,7 +118,8 @@ Here is an example of setting a timeout for an RPC using context.WithTimeout: // TODO: handle error. } -Here is an example of setting a timeout for an RPC using gax.WithTimeout: +Here is an example of setting a timeout for an RPC using +[github.com/googleapis/gax-go/v2.WithTimeout]: ctx := context.Background() // Do not set a timeout on the context passed to NewClient: dialing happens @@ -136,7 +136,8 @@ Here is an example of setting a timeout for an RPC using gax.WithTimeout: // TODO: handle error. } -Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel: +Here is an example of how to arrange for an RPC to be canceled, use +[context.WithCancel]: ctx := context.Background() // Do not cancel the context passed to NewClient: dialing happens asynchronously, @@ -155,10 +156,10 @@ Here is an example of how to arrange for an RPC to be canceled, use context.With // TODO: handle error. } -Do not attempt to control the initial connection (dialing) of a service by setting a -timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts -would be ineffective and would only interfere with credential refreshing, which uses -the same context. +Do not attempt to control the initial connection (dialing) of a service by +setting a timeout on the context passed to NewClient. Dialing is non-blocking, +so timeouts would be ineffective and would only interfere with credential +refreshing, which uses the same context. # Connection Pooling @@ -166,42 +167,42 @@ Connection pooling differs in clients based on their transport. Cloud clients either rely on HTTP or gRPC transports to communicate with Google Cloud. -Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the -underlying HTTP transport to cache connections for later re-use. These are cached to -the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in -http.DefaultTransport. +Cloud clients that use HTTP rely on the underlying HTTP transport to cache +connections for later re-use. These are cached to the http.MaxIdleConns +and http.MaxIdleConnsPerHost settings in http.DefaultTransport by default. -For gRPC clients (all others in this repo), connection pooling is configurable. Users -of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client -option to NewClient calls. This configures the underlying gRPC connections to be -pooled and addressed in a round robin fashion. +For gRPC clients, connection pooling is configurable. Users of Cloud Client +Libraries may specify option.WithGRPCConnectionPool(n) as a client option to +NewClient calls. This configures the underlying gRPC connections to be pooled +and accessed in a round robin fashion. -# Using the Libraries with Docker +# Using the Libraries in Container environments(Docker) -Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to -hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 -for more information. +Minimal container images like Alpine lack CA certificates. This causes RPCs to +appear to hang, because gRPC retries indefinitely. See +https://github.com/googleapis/google-cloud-go/issues/928 for more information. # Debugging -To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See -https://godoc.org/google.golang.org/grpc/grpclog for more information. +For tips on how to write tests against code that calls into our libraries check +out our [Debugging Guide]. -For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". +# Testing + +For tips on how to write tests against code that calls into our libraries check +out our [Testing Guide]. # Inspecting errors Most of the errors returned by the generated clients are wrapped in an [github.com/googleapis/gax-go/v2/apierror.APIError] and can be further unwrapped into a [google.golang.org/grpc/status.Status] or -[google.golang.org/api/googleapi.Error] depending -on the transport used to make the call (gRPC or REST). Converting your errors to -these types can be a useful way to get more information about what went wrong -while debugging. +[google.golang.org/api/googleapi.Error] depending on the transport used to make +the call (gRPC or REST). Converting your errors to these types can be a useful +way to get more information about what went wrong while debugging. -[github.com/googleapis/gax-go/v2/apierror.APIError] gives access to specific -details in the error. The transport-specific errors can still be unwrapped using -the [github.com/googleapis/gax-go/v2/apierror.APIError]. +APIError gives access to specific details in the error. The transport-specific +errors can still be unwrapped using the APIError. if err != nil { var ae *apierror.APIError @@ -223,36 +224,33 @@ still be parsed using the [google.golang.org/grpc/status.FromError] function. } } -If the REST transport was used, the [google.golang.org/api/googleapi.Error] can -be parsed in a similar way, allowing access to details such as the HTTP response -code. - - if err != nil { - var gerr *googleapi.Error - if errors.As(err, &gerr) { - log.Println(gerr.Message) - } - } - # Client Stability -Clients in this repository are considered alpha or beta unless otherwise -marked as stable in the README.md. Semver is not used to communicate stability -of clients. +Semver is used to communicate stability of the sub-modules of this package. +Note, some stable sub-modules do contain packages, and sometimes features, that +are considered unstable. If something is unstable it will be explicitly labeled +as such. Example of package does in an unstable package: + + NOTE: This package is in beta. It is not stable, and may be subject to changes. -Alpha and beta clients may change or go away without notice. +Clients that contain alpha and beta in their import path may change or go away +without notice. Clients marked stable will maintain compatibility with future versions for as long as we can reasonably sustain. Incompatible changes might be made in some situations, including: -- Security bugs may prompt backwards-incompatible changes. - -- Situations in which components are no longer feasible to maintain without -making breaking changes, including removal. - -- Parts of the client surface may be outright unstable and subject to change. -These parts of the surface will be labeled with the note, "It is EXPERIMENTAL -and subject to change or removal without notice." + - Security bugs may prompt backwards-incompatible changes. + - Situations in which components are no longer feasible to maintain without + making breaking changes, including removal. + - Parts of the client surface may be outright unstable and subject to change. + These parts of the surface will be labeled with the note, "It is EXPERIMENTAL + and subject to change or removal without notice." + +[testing against fake servers]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md#testing-grpc-services-using-fakes +[Vertex AI - Locations]: https://cloud.google.com/vertex-ai/docs/general/locations +[Google Application Default Credentials]: https://cloud.google.com/docs/authentication/external/set-up-adc +[Debugging Guide]: https://github.com/googleapis/google-cloud-go/blob/main/debug.md +[Testing Guide]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md */ package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 6248902a57377..95b54b1378d9d 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -26,6 +26,16 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/advisorynotifications/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/ai/generativelanguage/apiv1beta2": { + "api_shortname": "generativelanguage", + "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta2", + "description": "Generative Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1beta2", "release_level": "preview", "library_type": "GAPIC_AUTO" }, @@ -79,6 +89,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/alloydb/connectors/apiv1alpha": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1alpha", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "api_shortname": "analyticsadmin", "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", @@ -266,7 +286,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/appconnectors/apiv1": { @@ -276,7 +296,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/appgateways/apiv1": { @@ -286,7 +306,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": { @@ -296,7 +316,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/clientgateways/apiv1": { @@ -306,7 +326,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery": { @@ -326,7 +346,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/connection/apiv1": { @@ -366,7 +386,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { @@ -535,7 +555,7 @@ "description": "Cloud Build API", "language": "go", "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/cloudbuild/apiv1/v2", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv1/v2", "release_level": "stable", "library_type": "GAPIC_AUTO" }, @@ -589,6 +609,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/commerce/consumer/procurement/apiv1": { + "api_shortname": "cloudcommerceconsumerprocurement", + "distribution_name": "cloud.google.com/go/commerce/consumer/procurement/apiv1", + "description": "Cloud Commerce Consumer Procurement API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/commerce/latest/consumer/procurement/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/compute/apiv1": { "api_shortname": "compute", "distribution_name": "cloud.google.com/go/compute/apiv1", @@ -616,7 +646,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/confidentialcomputing/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/confidentialcomputing/apiv1alpha1": { @@ -686,7 +716,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/lineage/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataflow/apiv1beta3": { @@ -816,13 +846,13 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/debugger/apiv2", - "release_level": "stable", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/deploy/apiv1": { "api_shortname": "clouddeploy", "distribution_name": "cloud.google.com/go/deploy/apiv1", - "description": "Google Cloud Deploy API", + "description": "Cloud Deploy API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/deploy/latest/apiv1", @@ -876,7 +906,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/discoveryengine/apiv1beta": { @@ -986,7 +1016,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/publishing/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/filestore/apiv1": { @@ -1059,26 +1089,6 @@ "release_level": "preview", "library_type": "CORE" }, - "cloud.google.com/go/gaming/apiv1": { - "api_shortname": "gameservices", - "distribution_name": "cloud.google.com/go/gaming/apiv1", - "description": "Game Services API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gaming/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gaming/apiv1beta": { - "api_shortname": "gameservices", - "distribution_name": "cloud.google.com/go/gaming/apiv1beta", - "description": "Game Services API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gaming/latest/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/gkebackup/apiv1": { "api_shortname": "gkebackup", "distribution_name": "cloud.google.com/go/gkebackup/apiv1", @@ -1116,7 +1126,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gsuiteaddons/apiv1": { @@ -1316,7 +1326,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/routing/apiv2": { @@ -1429,6 +1439,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/netapp/apiv1": { + "api_shortname": "netapp", + "distribution_name": "cloud.google.com/go/netapp/apiv1", + "description": "NetApp API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/netapp/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/networkconnectivity/apiv1": { "api_shortname": "networkconnectivity", "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1", @@ -1599,6 +1619,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/policysimulator/apiv1": { + "api_shortname": "policysimulator", + "distribution_name": "cloud.google.com/go/policysimulator/apiv1", + "description": "Policy Simulator API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policysimulator/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/policytroubleshooter/apiv1": { "api_shortname": "policytroubleshooter", "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", @@ -1676,7 +1706,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/rapidmigrationassessment/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { @@ -2046,7 +2076,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storage": { @@ -2096,7 +2126,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/support/latest/apiv2", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4": { @@ -2186,7 +2216,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/video/stitcher/apiv1", - "release_level": "stable", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/transcoder/apiv1": { @@ -2266,7 +2296,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmwareengine/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vpcaccess/apiv1": { diff --git a/vendor/cloud.google.com/go/pubsub/CHANGES.md b/vendor/cloud.google.com/go/pubsub/CHANGES.md index 06c4ddaa6261a..6f95c40ce5219 100644 --- a/vendor/cloud.google.com/go/pubsub/CHANGES.md +++ b/vendor/cloud.google.com/go/pubsub/CHANGES.md @@ -1,5 +1,17 @@ # Changes +## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.32.0...pubsub/v1.33.0) (2023-07-24) + + +### Features + +* **pubsub:** Support payload wrapping for push subs ([#8292](https://github.com/googleapis/google-cloud-go/issues/8292)) ([fd49db5](https://github.com/googleapis/google-cloud-go/commit/fd49db50cd333a2c918f6a1c94f779b8936876fc)) + + +### Bug Fixes + +* **pubsub/pstest:** Update maxMessageRetentionDuration to be 31 days ([#8199](https://github.com/googleapis/google-cloud-go/issues/8199)) ([1fa4bb8](https://github.com/googleapis/google-cloud-go/commit/1fa4bb8b3f22aad0b97ccae610408720522a8b31)) + ## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.31.0...pubsub/v1.32.0) (2023-06-27) diff --git a/vendor/cloud.google.com/go/pubsub/internal/version.go b/vendor/cloud.google.com/go/pubsub/internal/version.go index acc180691aa63..ba70a43673b7c 100644 --- a/vendor/cloud.google.com/go/pubsub/internal/version.go +++ b/vendor/cloud.google.com/go/pubsub/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.32.0" +const Version = "1.33.0" diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go index 4a9f09bd83bf2..b2cf82e00a6c0 100644 --- a/vendor/cloud.google.com/go/pubsub/subscription.go +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -148,6 +148,10 @@ type PushConfig struct { // This field is optional and should be set only by users interested in // authenticated push. AuthenticationMethod AuthenticationMethod + + // The format of the delivered message to the push endpoint is defined by + // the chosen wrapper. When unset, `PubsubWrapper` is used. + Wrapper Wrapper } func (pc *PushConfig) toProto() *pb.PushConfig { @@ -165,12 +169,19 @@ func (pc *PushConfig) toProto() *pb.PushConfig { default: // TODO: add others here when GAIC adds more definitions. } } + if w := pc.Wrapper; w != nil { + switch wt := w.(type) { + case *PubsubWrapper: + pbCfg.Wrapper = wt.toProto() + case *NoWrapper: + pbCfg.Wrapper = wt.toProto() + default: + } + } return pbCfg } -// AuthenticationMethod is used by push points to verify the source of push requests. -// This interface defines fields that are part of a closed alpha that may not be accessible -// to all users. +// AuthenticationMethod is used by push subscriptions to verify the source of push requests. type AuthenticationMethod interface { isAuthMethod() bool } @@ -212,6 +223,49 @@ func (oidcToken *OIDCToken) toProto() *pb.PushConfig_OidcToken_ { } } +// Wrapper defines the format of message delivered to push endpoints. +type Wrapper interface { + isWrapper() bool +} + +// PubsubWrapper denotes sending the payload to the push endpoint in the form of the JSON +// representation of a PubsubMessage +// (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). +type PubsubWrapper struct{} + +var _ Wrapper = (*PubsubWrapper)(nil) + +func (p *PubsubWrapper) isWrapper() bool { return true } + +func (p *PubsubWrapper) toProto() *pb.PushConfig_PubsubWrapper_ { + if p == nil { + return nil + } + return &pb.PushConfig_PubsubWrapper_{ + PubsubWrapper: &pb.PushConfig_PubsubWrapper{}, + } +} + +// NoWrapper denotes not wrapping the payload sent to the push endpoint. +type NoWrapper struct { + WriteMetadata bool +} + +var _ Wrapper = (*NoWrapper)(nil) + +func (n *NoWrapper) isWrapper() bool { return true } + +func (n *NoWrapper) toProto() *pb.PushConfig_NoWrapper_ { + if n == nil { + return nil + } + return &pb.PushConfig_NoWrapper_{ + NoWrapper: &pb.PushConfig_NoWrapper{ + WriteMetadata: n.WriteMetadata, + }, + } +} + // BigQueryConfigState denotes the possible states for a BigQuery Subscription. type BigQueryConfigState int @@ -648,6 +702,16 @@ func protoToPushConfig(pbPc *pb.PushConfig) *PushConfig { } } } + if w := pbPc.Wrapper; w != nil { + switch wt := w.(type) { + case *pb.PushConfig_PubsubWrapper_: + pc.Wrapper = &PubsubWrapper{} + case *pb.PushConfig_NoWrapper_: + pc.Wrapper = &NoWrapper{ + WriteMetadata: wt.NoWrapper.WriteMetadata, + } + } + } return pc } diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json index 44b40608de409..85a0ffcf59f07 100644 --- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -12,6 +12,9 @@ "advisorynotifications": { "component": "advisorynotifications" }, + "ai": { + "component": "ai" + }, "aiplatform": { "component": "aiplatform" }, @@ -81,6 +84,9 @@ "cloudtasks": { "component": "cloudtasks" }, + "commerce": { + "component": "commerce" + }, "compute": { "component": "compute" }, @@ -159,9 +165,6 @@ "functions": { "component": "functions" }, - "gaming": { - "component": "gaming" - }, "gkebackup": { "component": "gkebackup" }, @@ -225,6 +228,9 @@ "monitoring": { "component": "monitoring" }, + "netapp": { + "component": "netapp" + }, "networkconnectivity": { "component": "networkconnectivity" }, @@ -255,6 +261,9 @@ "phishingprotection": { "component": "phishingprotection" }, + "policysimulator": { + "component": "policysimulator" + }, "policytroubleshooter": { "component": "policytroubleshooter" }, diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go new file mode 100644 index 0000000000000..e6a862dd90be6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto + +package least_requestv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This configuration allows the built-in LEAST_REQUEST LB policy to be configured via the LB policy +// extension point. See the :ref:`load balancing architecture overview +// ` for more information. +type LeastRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + ChoiceCount *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // `active_request_bias` must be greater than or equal to 0.0. + // + // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + ActiveRequestBias *v3.RuntimeDouble `protobuf:"bytes,2,opt,name=active_request_bias,json=activeRequestBias,proto3" json:"active_request_bias,omitempty"` + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig *v31.SlowStartConfig `protobuf:"bytes,3,opt,name=slow_start_config,json=slowStartConfig,proto3" json:"slow_start_config,omitempty"` + // Configuration for local zone aware load balancing or locality weighted load balancing. + LocalityLbConfig *v31.LocalityLbConfig `protobuf:"bytes,4,opt,name=locality_lb_config,json=localityLbConfig,proto3" json:"locality_lb_config,omitempty"` +} + +func (x *LeastRequest) Reset() { + *x = LeastRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeastRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeastRequest) ProtoMessage() {} + +func (x *LeastRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeastRequest.ProtoReflect.Descriptor instead. +func (*LeastRequest) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP(), []int{0} +} + +func (x *LeastRequest) GetChoiceCount() *wrappers.UInt32Value { + if x != nil { + return x.ChoiceCount + } + return nil +} + +func (x *LeastRequest) GetActiveRequestBias() *v3.RuntimeDouble { + if x != nil { + return x.ActiveRequestBias + } + return nil +} + +func (x *LeastRequest) GetSlowStartConfig() *v31.SlowStartConfig { + if x != nil { + return x.SlowStartConfig + } + return nil +} + +func (x *LeastRequest) GetLocalityLbConfig() *v31.LocalityLbConfig { + if x != nil { + return x.LocalityLbConfig + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc = []byte{ + 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x65, 0x61, 0x73, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x33, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x03, 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, + 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, + 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x42, 0x69, 0x61, 0x73, 0x12, 0x6f, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x72, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xd8, 0x01, 0x0a, 0x47, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x70, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x65, 0x61, + 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData = file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes = []interface{}{ + (*LeastRequest)(nil), // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest + (*wrappers.UInt32Value)(nil), // 1: google.protobuf.UInt32Value + (*v3.RuntimeDouble)(nil), // 2: envoy.config.core.v3.RuntimeDouble + (*v31.SlowStartConfig)(nil), // 3: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + (*v31.LocalityLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig +} +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs = []int32{ + 1, // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.choice_count:type_name -> google.protobuf.UInt32Value + 2, // 1: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble + 3, // 2: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.slow_start_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + 4, // 3: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.locality_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_init() +} +func file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_init() { + if File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeastRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto = out.File + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go new file mode 100644 index 0000000000000..344e831f6044a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go @@ -0,0 +1,237 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto + +package least_requestv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LeastRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LeastRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LeastRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LeastRequestMultiError, or +// nil if none found. +func (m *LeastRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *LeastRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetChoiceCount(); wrapper != nil { + + if wrapper.GetValue() < 2 { + err := LeastRequestValidationError{ + field: "ChoiceCount", + reason: "value must be greater than or equal to 2", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetActiveRequestBias()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetActiveRequestBias()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSlowStartConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocalityLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "LocalityLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "LocalityLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "LocalityLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return LeastRequestMultiError(errors) + } + + return nil +} + +// LeastRequestMultiError is an error wrapping multiple validation errors +// returned by LeastRequest.ValidateAll() if the designated constraints aren't met. +type LeastRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LeastRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LeastRequestMultiError) AllErrors() []error { return m } + +// LeastRequestValidationError is the validation error returned by +// LeastRequest.Validate if the designated constraints aren't met. +type LeastRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LeastRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LeastRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LeastRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LeastRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LeastRequestValidationError) ErrorName() string { return "LeastRequestValidationError" } + +// Error satisfies the builtin error interface +func (e LeastRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLeastRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LeastRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LeastRequestValidationError{} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/LICENSE new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/Makefile new file mode 100644 index 0000000000000..bdd863a203be8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/Makefile @@ -0,0 +1 @@ +include ../../../Makefile.Common diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/README.md new file mode 100644 index 0000000000000..4e88796845ffc --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/README.md @@ -0,0 +1,115 @@ +# Prometheus Normalization + +[OpenTelemetry's metric semantic convention](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md) is not compatible with [Prometheus' own metrics naming convention](https://prometheus.io/docs/practices/naming/). This module provides centralized functions to convert OpenTelemetry metrics to Prometheus-compliant metrics. These functions are used by the following components for Prometheus: + +* [prometheusreceiver](../../../receiver/prometheusreceiver/) +* [prometheusexporter](../../../exporter/prometheusexporter/) +* [prometheusremotewriteexporter](../../../exporter/prometheusremotewriteexporter/) + +## Metric name + +### Full normalization + +> **Warning** +> +> This feature can be controlled with [feature gate](https://github.com/open-telemetry/opentelemetry-collector/tree/main/featuregate) `pkg.translator.prometheus.NormalizeName`. It is currently enabled by default (beta stage). +> +> Example of how to disable it: +> ```shell-session +> $ otelcol --config=config.yaml --feature-gates=-pkg.translator.prometheus.NormalizeName +> ``` + +#### List of transformations to convert OpenTelemetry metrics to Prometheus metrics + +| Case | Transformation | Example | +|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| Unsupported characters and extraneous underscores | Replace unsupported characters with underscores (`_`). Drop redundant, leading and trailing underscores. | `(lambda).function.executions(#)` → `lambda_function_executions` | +| Standard unit | Convert the unit from [Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) to Prometheus standard and append | `system.filesystem.usage` with unit `By` → `system_filesystem_usage_bytes` | +| Non-standard unit (unit is surrounded with `{}`) | Drop the unit | `system.network.dropped` with unit `{packets}` → `system_network_dropped` | +| Non-standard unit (unit is **not** surrounded with `{}`) | Append the unit, if not already present, after sanitization (all non-alphanumeric chars are dropped) | `system.network.dropped` with unit `packets` → `system_network_dropped_packets` | +| Percentages (unit is `1`) | Append `_ratio` (for gauges only) | `system.memory.utilization` with unit `1` → `system_memory_utilization_ratio` | +| Percentages (unit is `%`) | Replace `%` with `percent` `_percent` | `storage.filesystem.utilization` with unit `%` → `storage_filesystem_utilization_percent` | +| Rates (unit contains `/`) | Replace `/` with `per` | `astro.light.speed` with unit `m/s` → `astro_light_speed_meters_per_second` | +| Counter | Append `_total` | `system.processes.created` → `system_processes_created_total` | + +List of standard OpenTelemetry units that will be translated to [Prometheus standard base units](https://prometheus.io/docs/practices/naming/#base-units): + +| OpenTelemetry Unit | Corresponding Prometheus Unit | +| ------------------ | ----------------------------- | +| **Time** | | +| `d` | `days` | +| `h` | `hours` | +| `min` | `minutes` | +| `s` | `seconds` | +| `ms` | `milliseconds` | +| `us` | `microseconds` | +| `ns` | `nanoseconds` | +| **Bytes** | | +| `By` | `bytes` | +| `KiBy` | `kibibytes` | +| `MiBy` | `mebibytes` | +| `GiBy` | `gibibytes` | +| `TiBy` | `tibibytes` | +| `KBy` | `kilobytes` | +| `MBy` | `megabytes` | +| `GBy` | `gigabytes` | +| `TBy` | `terabytes` | +| **SI Units** | | +| `m` | `meters` | +| `V` | `volts` | +| `A` | `amperes` | +| `J` | `joules` | +| `W` | `watts` | +| `g` | `grams` | +| **Misc.** | | +| `Cel` | `celsius` | +| `Hz` | `hertz` | +| `%` | `percent` | + +> **Note** +> Prometheus also recommends using base units (no kilobytes, or milliseconds, for example) but these functions will not attempt to convert non-base units to base units. + +#### List of transformations performed to convert Prometheus metrics to OpenTelemetry metrics + +| Case | Transformation | Example | +|------------------------------------|------------------------------------------------------------------------|---------------------------------------------------------------------------------| +| UNIT defined in OpenMetrics format | Drop the unit suffix and set it in the OpenTelemetry metric unit field | `system_network_dropped_packets` → `system_network_dropped` with `packets` unit | +| Counter | Drop `_total` suffix | `system_processes_created_total`→ `system_processes_created` | + +### Simple normalization + +If feature `pkg.translator.prometheus.NormalizeName` is not enabled, a simple sanitization of the OpenTelemetry metric name is performed to ensure it follows Prometheus naming conventions: + +* Drop unsupported characters and replace with underscores (`_`) +* Remove redundant, leading and trailing underscores +* Ensure metric name doesn't start with a digit by prefixing with an underscore + +No processing of the unit is performed, and `_total` is not appended for *Counters*. + +## Labels + +OpenTelemetry *Attributes* are converted to Prometheus labels and normalized to follow the [Prometheus labels naming rules](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + +The following transformations are performed on OpenTelemetry *Attributes* to produce Prometheus labels: + +* Drop unsupported characters and replace with underscores (`_`) +* Prefix label with `key_` if it doesn't start with a letter, except if it's already prefixed with double-underscore (`__`) + +By default, labels that start with a simple underscore (`_`) are prefixed with `key`, which is strictly unnecessary to follow Prometheus labels naming rules. This behavior can be disabled with the feature `pkg.translator.prometheus.PermissiveLabelSanitization`, which must be activated with the feature gate option of the collector: + +```shell-session +$ otelcol --config=config.yaml --feature-gates=pkg.translator.prometheus.PermissiveLabelSanitization +``` + +Examples: + +| OpenTelemetry Attribute | Prometheus Label | +|---|---| +| `name` | `name` | +| `host.name` | `host_name` | +| `host_name` | `host_name` | +| `name (of the host)` | `name__of_the_host_` | +| `2 cents` | `key_2_cents` | +| `__name` | `__name` | +| `_name` | `key_name` | +| `_name` | `_name` (if `PermissiveLabelSanitization` is enabled) | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/metadata.yaml new file mode 100644 index 0000000000000..ad3618bcdf56b --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/metadata.yaml @@ -0,0 +1,3 @@ +status: + codeowners: + active: [dashpole, bertysentry] \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_label.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_label.go new file mode 100644 index 0000000000000..af0960e862373 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_label.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" + +import ( + "strings" + "unicode" + + "go.opentelemetry.io/collector/featuregate" +) + +var dropSanitizationGate = featuregate.GlobalRegistry().MustRegister( + "pkg.translator.prometheus.PermissiveLabelSanitization", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("Controls whether to change labels starting with '_' to 'key_'."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"), +) + +// Normalizes the specified label to follow Prometheus label names standard +// +// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels +// +// Labels that start with non-letter rune will be prefixed with "key_" +// +// Exception is made for double-underscores which are allowed +func NormalizeLabel(label string) string { + + // Trivial case + if len(label) == 0 { + return label + } + + // Replace all non-alphanumeric runes with underscores + label = strings.Map(sanitizeRune, label) + + // If label starts with a number, prepend with "key_" + if unicode.IsDigit(rune(label[0])) { + label = "key_" + label + } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") && !dropSanitizationGate.IsEnabled() { + label = "key" + label + } + + return label +} + +// Return '_' for anything non-alphanumeric +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + return '_' +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_name.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_name.go new file mode 100644 index 0000000000000..72fc04cea220c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_name.go @@ -0,0 +1,280 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" + +import ( + "strings" + "unicode" + + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +// The map to translate OTLP units to Prometheus units +// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html +// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units) +// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units +// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units +var unitMap = map[string]string{ + + // Time + "d": "days", + "h": "hours", + "min": "minutes", + "s": "seconds", + "ms": "milliseconds", + "us": "microseconds", + "ns": "nanoseconds", + + // Bytes + "By": "bytes", + "KiBy": "kibibytes", + "MiBy": "mebibytes", + "GiBy": "gibibytes", + "TiBy": "tibibytes", + "KBy": "kilobytes", + "MBy": "megabytes", + "GBy": "gigabytes", + "TBy": "terabytes", + + // SI + "m": "meters", + "V": "volts", + "A": "amperes", + "J": "joules", + "W": "watts", + "g": "grams", + + // Misc + "Cel": "celsius", + "Hz": "hertz", + "1": "", + "%": "percent", +} + +// The map that translates the "per" unit +// Example: s => per second (singular) +var perUnitMap = map[string]string{ + "s": "second", + "m": "minute", + "h": "hour", + "d": "day", + "w": "week", + "mo": "month", + "y": "year", +} + +var normalizeNameGate = featuregate.GlobalRegistry().MustRegister( + "pkg.translator.prometheus.NormalizeName", + featuregate.StageBeta, + featuregate.WithRegisterDescription("Controls whether metrics names are automatically normalized to follow Prometheus naming convention"), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"), +) + +// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric +// +// Metric name is prefixed with specified namespace and underscore (if any). +// Namespace is not cleaned up. Make sure specified namespace follows Prometheus +// naming convention. +// +// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels +// and https://prometheus.io/docs/practices/naming/#metric-and-label-naming +func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { + var metricName string + + // Full normalization following standard Prometheus naming conventions + if addMetricSuffixes && normalizeNameGate.IsEnabled() { + return normalizeName(metric, namespace) + } + + // Simple case (no full normalization, no units, etc.), we simply trim out forbidden chars + metricName = RemovePromForbiddenRunes(metric.Name()) + + // Namespace? + if namespace != "" { + return namespace + "_" + metricName + } + + // Metric name starts with a digit? Prefix it with an underscore + if metricName != "" && unicode.IsDigit(rune(metricName[0])) { + metricName = "_" + metricName + } + + return metricName +} + +// Build a normalized name for the specified metric +func normalizeName(metric pmetric.Metric, namespace string) string { + + // Split metric name in "tokens" (remove all non-alphanumeric) + nameTokens := strings.FieldsFunc( + metric.Name(), + func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }, + ) + + // Split unit at the '/' if any + unitTokens := strings.SplitN(metric.Unit(), "/", 2) + + // Main unit + // Append if not blank, doesn't contain '{}', and is not present in metric name already + if len(unitTokens) > 0 { + mainUnitOtel := strings.TrimSpace(unitTokens[0]) + if mainUnitOtel != "" && !strings.ContainsAny(mainUnitOtel, "{}") { + mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOtel)) + if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) { + nameTokens = append(nameTokens, mainUnitProm) + } + } + + // Per unit + // Append if not blank, doesn't contain '{}', and is not present in metric name already + if len(unitTokens) > 1 && unitTokens[1] != "" { + perUnitOtel := strings.TrimSpace(unitTokens[1]) + if perUnitOtel != "" && !strings.ContainsAny(perUnitOtel, "{}") { + perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOtel)) + if perUnitProm != "" && !contains(nameTokens, perUnitProm) { + nameTokens = append(append(nameTokens, "per"), perUnitProm) + } + } + } + + } + + // Append _total for Counters + if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { + nameTokens = append(removeItem(nameTokens, "total"), "total") + } + + // Append _ratio for metrics with unit "1" + // Some Otel receivers improperly use unit "1" for counters of objects + // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions + // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY + // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) + if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge { + nameTokens = append(removeItem(nameTokens, "ratio"), "ratio") + } + + // Namespace? + if namespace != "" { + nameTokens = append([]string{namespace}, nameTokens...) + } + + // Build the string from the tokens, separated with underscores + normalizedName := strings.Join(nameTokens, "_") + + // Metric name cannot start with a digit, so prefix it with "_" in this case + if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { + normalizedName = "_" + normalizedName + } + + return normalizedName +} + +// TrimPromSuffixes trims type and unit prometheus suffixes from a metric name. +// Following the [OpenTelemetry specs] for converting Prometheus Metric points to OTLP. +// +// [OpenTelemetry specs]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#metric-metadata +func TrimPromSuffixes(promName string, metricType pmetric.MetricType, unit string) string { + nameTokens := strings.Split(promName, "_") + if len(nameTokens) == 1 { + return promName + } + + nameTokens = removeTypeSuffixes(nameTokens, metricType) + nameTokens = removeUnitSuffixes(nameTokens, unit) + + return strings.Join(nameTokens, "_") +} + +func removeTypeSuffixes(tokens []string, metricType pmetric.MetricType) []string { + switch metricType { + case pmetric.MetricTypeSum: + // Only counters are expected to have a type suffix at this point. + // for other types, suffixes are removed during scrape. + return removeSuffix(tokens, "total") + default: + return tokens + } +} + +func removeUnitSuffixes(nameTokens []string, unit string) []string { + l := len(nameTokens) + unitTokens := strings.Split(unit, "_") + lu := len(unitTokens) + + if lu == 0 || l <= lu { + return nameTokens + } + + suffixed := true + for i := range unitTokens { + if nameTokens[l-i-1] != unitTokens[lu-i-1] { + suffixed = false + break + } + } + + if suffixed { + return nameTokens[:l-lu] + } + + return nameTokens +} + +func removeSuffix(tokens []string, suffix string) []string { + l := len(tokens) + if tokens[l-1] == suffix { + return tokens[:l-1] + } + + return tokens +} + +// Clean up specified string so it's Prometheus compliant +func CleanUpString(s string) string { + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") +} + +func RemovePromForbiddenRunes(s string) string { + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") +} + +// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit +// Returns the specified unit if not found in unitMap +func unitMapGetOrDefault(unit string) string { + if promUnit, ok := unitMap[unit]; ok { + return promUnit + } + return unit +} + +// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit +// Returns the specified unit if not found in perUnitMap +func perUnitMapGetOrDefault(perUnit string) string { + if promPerUnit, ok := perUnitMap[perUnit]; ok { + return promPerUnit + } + return perUnit +} + +// Returns whether the slice contains the specified value +func contains(slice []string, value string) bool { + for _, sliceEntry := range slice { + if sliceEntry == value { + return true + } + } + return false +} + +// Remove the specified value from the slice +func removeItem(slice []string, value string) []string { + newSlice := make([]string, 0, len(slice)) + for _, sliceEntry := range slice { + if sliceEntry != value { + newSlice = append(newSlice, sliceEntry) + } + } + return newSlice +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/unit_to_ucum.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/unit_to_ucum.go new file mode 100644 index 0000000000000..b2f2c4f3aa2f5 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/unit_to_ucum.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" + +import "strings" + +var wordToUCUM = map[string]string{ + + // Time + "days": "d", + "hours": "h", + "minutes": "min", + "seconds": "s", + "milliseconds": "ms", + "microseconds": "us", + "nanoseconds": "ns", + + // Bytes + "bytes": "By", + "kibibytes": "KiBy", + "mebibytes": "MiBy", + "gibibytes": "GiBy", + "tibibytes": "TiBy", + "kilobytes": "KBy", + "megabytes": "MBy", + "gigabytes": "GBy", + "terabytes": "TBy", + + // SI + "meters": "m", + "volts": "V", + "amperes": "A", + "joules": "J", + "watts": "W", + "grams": "g", + + // Misc + "celsius": "Cel", + "hertz": "Hz", + "ratio": "1", + "percent": "%", +} + +// The map that translates the "per" unit +// Example: per_second (singular) => /s +var perWordToUCUM = map[string]string{ + "second": "s", + "minute": "m", + "hour": "h", + "day": "d", + "week": "w", + "month": "mo", + "year": "y", +} + +// UnitWordToUCUM converts english unit words to UCUM units: +// https://ucum.org/ucum#section-Alphabetic-Index-By-Symbol +// It also handles rates, such as meters_per_second, by translating the first +// word to UCUM, and the "per" word to UCUM. It joins them with a "/" between. +func UnitWordToUCUM(unit string) string { + unitTokens := strings.SplitN(unit, "_per_", 2) + if len(unitTokens) == 0 { + return "" + } + ucumUnit := wordToUCUMOrDefault(unitTokens[0]) + if len(unitTokens) > 1 && unitTokens[1] != "" { + ucumUnit += "/" + perWordToUCUMOrDefault(unitTokens[1]) + } + return ucumUnit +} + +// wordToUCUMOrDefault retrieves the Prometheus "basic" unit corresponding to +// the specified "basic" unit. Returns the specified unit if not found in +// wordToUCUM. +func wordToUCUMOrDefault(unit string) string { + if promUnit, ok := wordToUCUM[unit]; ok { + return promUnit + } + return unit +} + +// perWordToUCUMOrDefault retrieve the Prometheus "per" unit corresponding to +// the specified "per" unit. Returns the specified unit if not found in perWordToUCUM. +func perWordToUCUMOrDefault(perUnit string) string { + if promPerUnit, ok := perWordToUCUM[perUnit]; ok { + return promPerUnit + } + return perUnit +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/LICENSE b/vendor/go.opentelemetry.io/collector/featuregate/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/featuregate/Makefile b/vendor/go.opentelemetry.io/collector/featuregate/Makefile new file mode 100644 index 0000000000000..39734bfaebb22 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/featuregate/README.md b/vendor/go.opentelemetry.io/collector/featuregate/README.md new file mode 100644 index 0000000000000..d3e3c802d63b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/README.md @@ -0,0 +1,77 @@ +# Collector Feature Gates + +This package provides a mechanism that allows operators to enable and disable +experimental or transitional features at deployment time. These flags should +be able to govern the behavior of the application starting as early as possible +and should be available to every component such that decisions may be made +based on flags at the component level. + +## Usage + +Feature gates must be defined and registered with the global registry in +an `init()` function. This makes the `Gate` available to be configured and +queried with the defined [`Stage`](#feature-lifecycle) default value. +A `Gate` can have a list of associated issues that allow users to refer to +the issue and report any additional problems or understand the context of the `Gate`. +Once a `Gate` has been marked as `Stable`, it must have a `RemovalVersion` set. + +```go +var myFeatureGate = featuregate.GlobalRegistry().MustRegister( + "namespaced.uniqueIdentifier", + featuregate.Stable, + featuregate.WithRegisterFromVersion("v0.65.0") + featuregate.WithRegisterDescription("A brief description of what the gate controls"), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/6167"), + featuregate.WithRegisterToVersion("v0.70.0")) +``` + +The status of the gate may later be checked by interrogating the global +feature gate registry: + +```go +if myFeatureGate.IsEnabled() { + setupNewFeature() +} +``` + +Note that querying the registry takes a read lock and accesses a map, so it +should be done once and the result cached for local use if repeated checks +are required. Avoid querying the registry in a loop. + +## Controlling Gates + +Feature gates can be enabled or disabled via the CLI, with the +`--feature-gates` flag. When using the CLI flag, gate +identifiers must be presented as a comma-delimited list. Gate identifiers +prefixed with `-` will disable the gate and prefixing with `+` or with no +prefix will enable the gate. + +```shell +otelcol --config=config.yaml --feature-gates=gate1,-gate2,+gate3 +``` + +This will enable `gate1` and `gate3` and disable `gate2`. + +## Feature Lifecycle + +Features controlled by a `Gate` should follow a three-stage lifecycle, +modeled after the [system used by Kubernetes](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-stages): + +1. An `alpha` stage where the feature is disabled by default and must be enabled + through a `Gate`. +2. A `beta` stage where the feature has been well tested and is enabled by + default but can be disabled through a `Gate`. +3. A generally available or `stable` stage where the feature is permanently enabled. At this stage + the gate should no longer be explicitly used. Disabling the gate will produce an error and + explicitly enabling will produce a warning log. +4. A `stable` feature gate will be removed in the version specified by its `ToVersion` value. + +Features that prove unworkable in the `alpha` stage may be discontinued +without proceeding to the `beta` stage. Instead, they will proceed to the +`deprecated` stage, which will feature is permanently disabled. A feature gate will +be removed once it has been `deprecated` for at least 2 releases of the collector. + +Features that make it to the `beta` stage are intended to reach general availability but may still be discontinued. +If, after wider use, it is determined that the gate should be discontinued it will be reverted to the `alpha` stage +for 2 releases and then proceed to the `deprecated` stage. If instead it is ready for general availability it will +proceed to the `stable` stage. diff --git a/vendor/go.opentelemetry.io/collector/featuregate/flag.go b/vendor/go.opentelemetry.io/collector/featuregate/flag.go new file mode 100644 index 0000000000000..3ff105d3e69fa --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/flag.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +import ( + "flag" + "strings" + + "go.uber.org/multierr" +) + +// NewFlag returns a flag.Value that directly applies feature gate statuses to a Registry. +func NewFlag(reg *Registry) flag.Value { + return &flagValue{reg: reg} +} + +// flagValue implements the flag.Value interface and directly applies feature gate statuses to a Registry. +type flagValue struct { + reg *Registry +} + +func (f *flagValue) String() string { + var ids []string + f.reg.VisitAll(func(g *Gate) { + id := g.ID() + if !g.IsEnabled() { + id = "-" + id + } + ids = append(ids, id) + }) + return strings.Join(ids, ",") +} + +func (f *flagValue) Set(s string) error { + if s == "" { + return nil + } + + var errs error + ids := strings.Split(s, ",") + for i := range ids { + id := ids[i] + val := true + switch id[0] { + case '-': + id = id[1:] + val = false + case '+': + id = id[1:] + } + errs = multierr.Append(errs, f.reg.Set(id, val)) + } + return errs +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/gate.go b/vendor/go.opentelemetry.io/collector/featuregate/gate.go new file mode 100644 index 0000000000000..2b6cae31e85b5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/gate.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +import "sync/atomic" + +// Gate is an immutable object that is owned by the Registry and represents an individual feature that +// may be enabled or disabled based on the lifecycle state of the feature and CLI flags specified by the user. +type Gate struct { + id string + description string + referenceURL string + fromVersion string + toVersion string + stage Stage + enabled *atomic.Bool +} + +// ID returns the id of the Gate. +func (g *Gate) ID() string { + return g.id +} + +// IsEnabled returns true if the feature described by the Gate is enabled. +func (g *Gate) IsEnabled() bool { + return g.enabled.Load() +} + +// Description returns the description for the Gate. +func (g *Gate) Description() string { + return g.description +} + +// Stage returns the Gate's lifecycle stage. +func (g *Gate) Stage() Stage { + return g.stage +} + +// ReferenceURL returns the URL to the contextual information about the Gate. +func (g *Gate) ReferenceURL() string { + return g.referenceURL +} + +// FromVersion returns the version information when the Gate's was added. +func (g *Gate) FromVersion() string { + return g.fromVersion +} + +// ToVersion returns the version information when Gate's in StageStable. +func (g *Gate) ToVersion() string { + return g.toVersion +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/registry.go b/vendor/go.opentelemetry.io/collector/featuregate/registry.go new file mode 100644 index 0000000000000..e734711082e40 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/registry.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" +) + +var globalRegistry = NewRegistry() + +// GlobalRegistry returns the global Registry. +func GlobalRegistry() *Registry { + return globalRegistry +} + +type Registry struct { + gates sync.Map +} + +// NewRegistry returns a new empty Registry. +func NewRegistry() *Registry { + return &Registry{} +} + +// RegisterOption allows to configure additional information about a Gate during registration. +type RegisterOption interface { + apply(g *Gate) +} + +type registerOptionFunc func(g *Gate) + +func (ro registerOptionFunc) apply(g *Gate) { + ro(g) +} + +// WithRegisterDescription adds description for the Gate. +func WithRegisterDescription(description string) RegisterOption { + return registerOptionFunc(func(g *Gate) { + g.description = description + }) +} + +// WithRegisterReferenceURL adds a URL that has all the contextual information about the Gate. +func WithRegisterReferenceURL(url string) RegisterOption { + return registerOptionFunc(func(g *Gate) { + g.referenceURL = url + }) +} + +// WithRegisterFromVersion is used to set the Gate "FromVersion". +// The "FromVersion" contains the Collector release when a feature is introduced. +func WithRegisterFromVersion(fromVersion string) RegisterOption { + return registerOptionFunc(func(g *Gate) { + g.fromVersion = fromVersion + }) +} + +// WithRegisterToVersion is used to set the Gate "ToVersion". +// The "ToVersion", if not empty, contains the last Collector release in which you can still use a feature gate. +// If the feature stage is either "Deprecated" or "Stable", the "ToVersion" is the Collector release when the feature is removed. +func WithRegisterToVersion(toVersion string) RegisterOption { + return registerOptionFunc(func(g *Gate) { + g.toVersion = toVersion + }) +} + +// MustRegister like Register but panics if an invalid ID or gate options are provided. +func (r *Registry) MustRegister(id string, stage Stage, opts ...RegisterOption) *Gate { + g, err := r.Register(id, stage, opts...) + if err != nil { + panic(err) + } + return g +} + +// Register a Gate and return it. The returned Gate can be used to check if is enabled or not. +func (r *Registry) Register(id string, stage Stage, opts ...RegisterOption) (*Gate, error) { + g := &Gate{ + id: id, + stage: stage, + } + for _, opt := range opts { + opt.apply(g) + } + switch g.stage { + case StageAlpha, StageDeprecated: + g.enabled = &atomic.Bool{} + case StageBeta, StageStable: + enabled := &atomic.Bool{} + enabled.Store(true) + g.enabled = enabled + default: + return nil, fmt.Errorf("unknown stage value %q for gate %q", stage, id) + } + if (g.stage == StageStable || g.stage == StageDeprecated) && g.toVersion == "" { + return nil, fmt.Errorf("no removal version set for %v gate %q", g.stage.String(), id) + } + if _, loaded := r.gates.LoadOrStore(id, g); loaded { + return nil, fmt.Errorf("attempted to add pre-existing gate %q", id) + } + return g, nil +} + +// Set the enabled valued for a Gate identified by the given id. +func (r *Registry) Set(id string, enabled bool) error { + v, ok := r.gates.Load(id) + if !ok { + validGates := []string{} + r.VisitAll(func(g *Gate) { + validGates = append(validGates, g.ID()) + }) + return fmt.Errorf("no such feature gate %q. valid gates: %v", id, validGates) + } + g := v.(*Gate) + + switch g.stage { + case StageStable: + if !enabled { + return fmt.Errorf("feature gate %q is stable, can not be disabled", id) + } + fmt.Printf("Feature gate %q is stable and already enabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion) + case StageDeprecated: + if enabled { + return fmt.Errorf("feature gate %q is deprecated, can not be enabled", id) + } + fmt.Printf("Feature gate %q is deprecated and already disabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion) + default: + g.enabled.Store(enabled) + } + return nil +} + +// VisitAll visits all the gates in lexicographical order, calling fn for each. +func (r *Registry) VisitAll(fn func(*Gate)) { + var gates []*Gate + r.gates.Range(func(key, value any) bool { + gates = append(gates, value.(*Gate)) + return true + }) + sort.Slice(gates, func(i, j int) bool { + return gates[i].ID() < gates[j].ID() + }) + for i := range gates { + fn(gates[i]) + } +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/stage.go b/vendor/go.opentelemetry.io/collector/featuregate/stage.go new file mode 100644 index 0000000000000..f2be1b248d372 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/stage.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +// Stage represents the Gate's lifecycle and what is the expected state of it. +type Stage int8 + +const ( + // StageAlpha is used when creating a new feature and the Gate must be explicitly enabled + // by the operator. + // + // The Gate will be disabled by default. + StageAlpha Stage = iota + // StageBeta is used when the feature gate is well tested and is enabled by default, + // but can be disabled by a Gate. + // + // The Gate will be enabled by default. + StageBeta + // StageStable is used when feature is permanently enabled and can not be disabled by a Gate. + // This value is used to provide feedback to the user that the gate will be removed in the next versions. + // + // The Gate will be enabled by default and will return an error if disabled. + StageStable + // StageDeprecated is used when feature is permanently disabled and can not be enabled by a Gate. + // This value is used to provide feedback to the user that the gate will be removed in the next versions. + // + // The Gate will be disabled by default and will return an error if modified. + StageDeprecated +) + +func (s Stage) String() string { + switch s { + case StageAlpha: + return "Alpha" + case StageBeta: + return "Beta" + case StageStable: + return "Stable" + case StageDeprecated: + return "Deprecated" + } + return "Unknown" +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go new file mode 100644 index 0000000000000..99e6ac83653ae --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +// MarshalSizer is the interface that groups the basic Marshal and Size methods +type MarshalSizer interface { + Marshaler + Sizer +} + +// Marshaler marshals pdata.Logs into bytes. +type Marshaler interface { + // MarshalLogs the given pdata.Logs into bytes. + // If the error is not nil, the returned bytes slice cannot be used. + MarshalLogs(ld Logs) ([]byte, error) +} + +// Unmarshaler unmarshalls bytes into pdata.Logs. +type Unmarshaler interface { + // UnmarshalLogs the given bytes into pdata.Logs. + // If the error is not nil, the returned pdata.Logs cannot be used. + UnmarshalLogs(buf []byte) (Logs, error) +} + +// Sizer is an optional interface implemented by the Marshaler, +// that calculates the size of a marshaled Logs. +type Sizer interface { + // LogsSize returns the size in bytes of a marshaled Logs. + LogsSize(ld Logs) int +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go new file mode 100644 index 0000000000000..a20f3027b0bb9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go @@ -0,0 +1,148 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/data" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// LogRecord are experimental implementation of OpenTelemetry Log Data Model. + +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLogRecord function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LogRecord struct { + orig *otlplogs.LogRecord +} + +func newLogRecord(orig *otlplogs.LogRecord) LogRecord { + return LogRecord{orig} +} + +// NewLogRecord creates a new empty LogRecord. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewLogRecord() LogRecord { + return newLogRecord(&otlplogs.LogRecord{}) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms LogRecord) MoveTo(dest LogRecord) { + *dest.orig = *ms.orig + *ms.orig = otlplogs.LogRecord{} +} + +// ObservedTimestamp returns the observedtimestamp associated with this LogRecord. +func (ms LogRecord) ObservedTimestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.ObservedTimeUnixNano) +} + +// SetObservedTimestamp replaces the observedtimestamp associated with this LogRecord. +func (ms LogRecord) SetObservedTimestamp(v pcommon.Timestamp) { + ms.orig.ObservedTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this LogRecord. +func (ms LogRecord) Timestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this LogRecord. +func (ms LogRecord) SetTimestamp(v pcommon.Timestamp) { + ms.orig.TimeUnixNano = uint64(v) +} + +// TraceID returns the traceid associated with this LogRecord. +func (ms LogRecord) TraceID() pcommon.TraceID { + return pcommon.TraceID(ms.orig.TraceId) +} + +// SetTraceID replaces the traceid associated with this LogRecord. +func (ms LogRecord) SetTraceID(v pcommon.TraceID) { + ms.orig.TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this LogRecord. +func (ms LogRecord) SpanID() pcommon.SpanID { + return pcommon.SpanID(ms.orig.SpanId) +} + +// SetSpanID replaces the spanid associated with this LogRecord. +func (ms LogRecord) SetSpanID(v pcommon.SpanID) { + ms.orig.SpanId = data.SpanID(v) +} + +// Flags returns the flags associated with this LogRecord. +func (ms LogRecord) Flags() LogRecordFlags { + return LogRecordFlags(ms.orig.Flags) +} + +// SetFlags replaces the flags associated with this LogRecord. +func (ms LogRecord) SetFlags(v LogRecordFlags) { + ms.orig.Flags = uint32(v) +} + +// SeverityText returns the severitytext associated with this LogRecord. +func (ms LogRecord) SeverityText() string { + return ms.orig.SeverityText +} + +// SetSeverityText replaces the severitytext associated with this LogRecord. +func (ms LogRecord) SetSeverityText(v string) { + ms.orig.SeverityText = v +} + +// SeverityNumber returns the severitynumber associated with this LogRecord. +func (ms LogRecord) SeverityNumber() SeverityNumber { + return SeverityNumber(ms.orig.SeverityNumber) +} + +// SetSeverityNumber replaces the severitynumber associated with this LogRecord. +func (ms LogRecord) SetSeverityNumber(v SeverityNumber) { + ms.orig.SeverityNumber = otlplogs.SeverityNumber(v) +} + +// Body returns the body associated with this LogRecord. +func (ms LogRecord) Body() pcommon.Value { + return pcommon.Value(internal.NewValue(&ms.orig.Body)) +} + +// Attributes returns the Attributes associated with this LogRecord. +func (ms LogRecord) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes)) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this LogRecord. +func (ms LogRecord) DroppedAttributesCount() uint32 { + return ms.orig.DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this LogRecord. +func (ms LogRecord) SetDroppedAttributesCount(v uint32) { + ms.orig.DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms LogRecord) CopyTo(dest LogRecord) { + dest.SetObservedTimestamp(ms.ObservedTimestamp()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) + dest.SetFlags(ms.Flags()) + dest.SetSeverityText(ms.SeverityText()) + dest.SetSeverityNumber(ms.SeverityNumber()) + ms.Body().CopyTo(dest.Body()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go new file mode 100644 index 0000000000000..6c564822d6eff --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "sort" + + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +// LogRecordSlice logically represents a slice of LogRecord. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewLogRecordSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LogRecordSlice struct { + orig *[]*otlplogs.LogRecord +} + +func newLogRecordSlice(orig *[]*otlplogs.LogRecord) LogRecordSlice { + return LogRecordSlice{orig} +} + +// NewLogRecordSlice creates a LogRecordSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewLogRecordSlice() LogRecordSlice { + orig := []*otlplogs.LogRecord(nil) + return newLogRecordSlice(&orig) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewLogRecordSlice()". +func (es LogRecordSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es LogRecordSlice) At(i int) LogRecord { + return newLogRecord((*es.orig)[i]) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new LogRecordSlice can be initialized: +// +// es := NewLogRecordSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es LogRecordSlice) EnsureCapacity(newCap int) { + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlplogs.LogRecord, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty LogRecord. +// It returns the newly added LogRecord. +func (es LogRecordSlice) AppendEmpty() LogRecord { + *es.orig = append(*es.orig, &otlplogs.LogRecord{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es LogRecordSlice) MoveAndAppendTo(dest LogRecordSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es LogRecordSlice) RemoveIf(f func(LogRecord) bool) { + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es LogRecordSlice) CopyTo(dest LogRecordSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newLogRecord((*es.orig)[i]).CopyTo(newLogRecord((*dest.orig)[i])) + } + return + } + origs := make([]otlplogs.LogRecord, srcLen) + wrappers := make([]*otlplogs.LogRecord, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newLogRecord((*es.orig)[i]).CopyTo(newLogRecord(wrappers[i])) + } + *dest.orig = wrappers +} + +// Sort sorts the LogRecord elements within LogRecordSlice given the +// provided less function so that two instances of LogRecordSlice +// can be compared. +func (es LogRecordSlice) Sort(less func(a, b LogRecord) bool) { + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go new file mode 100644 index 0000000000000..870d53a63732b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceLogs is a collection of logs from a Resource. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceLogs function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceLogs struct { + orig *otlplogs.ResourceLogs +} + +func newResourceLogs(orig *otlplogs.ResourceLogs) ResourceLogs { + return ResourceLogs{orig} +} + +// NewResourceLogs creates a new empty ResourceLogs. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewResourceLogs() ResourceLogs { + return newResourceLogs(&otlplogs.ResourceLogs{}) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ResourceLogs) MoveTo(dest ResourceLogs) { + *dest.orig = *ms.orig + *ms.orig = otlplogs.ResourceLogs{} +} + +// Resource returns the resource associated with this ResourceLogs. +func (ms ResourceLogs) Resource() pcommon.Resource { + return pcommon.Resource(internal.NewResource(&ms.orig.Resource)) +} + +// SchemaUrl returns the schemaurl associated with this ResourceLogs. +func (ms ResourceLogs) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ResourceLogs. +func (ms ResourceLogs) SetSchemaUrl(v string) { + ms.orig.SchemaUrl = v +} + +// ScopeLogs returns the ScopeLogs associated with this ResourceLogs. +func (ms ResourceLogs) ScopeLogs() ScopeLogsSlice { + return newScopeLogsSlice(&ms.orig.ScopeLogs) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ResourceLogs) CopyTo(dest ResourceLogs) { + ms.Resource().CopyTo(dest.Resource()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.ScopeLogs().CopyTo(dest.ScopeLogs()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go new file mode 100644 index 0000000000000..19bd2e58ddc6e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "sort" + + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +// ResourceLogsSlice logically represents a slice of ResourceLogs. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewResourceLogsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceLogsSlice struct { + orig *[]*otlplogs.ResourceLogs +} + +func newResourceLogsSlice(orig *[]*otlplogs.ResourceLogs) ResourceLogsSlice { + return ResourceLogsSlice{orig} +} + +// NewResourceLogsSlice creates a ResourceLogsSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewResourceLogsSlice() ResourceLogsSlice { + orig := []*otlplogs.ResourceLogs(nil) + return newResourceLogsSlice(&orig) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceLogsSlice()". +func (es ResourceLogsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceLogsSlice) At(i int) ResourceLogs { + return newResourceLogs((*es.orig)[i]) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ResourceLogsSlice can be initialized: +// +// es := NewResourceLogsSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ResourceLogsSlice) EnsureCapacity(newCap int) { + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlplogs.ResourceLogs, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ResourceLogs. +// It returns the newly added ResourceLogs. +func (es ResourceLogsSlice) AppendEmpty() ResourceLogs { + *es.orig = append(*es.orig, &otlplogs.ResourceLogs{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceLogsSlice) MoveAndAppendTo(dest ResourceLogsSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) { + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ResourceLogsSlice) CopyTo(dest ResourceLogsSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceLogs((*es.orig)[i]).CopyTo(newResourceLogs((*dest.orig)[i])) + } + return + } + origs := make([]otlplogs.ResourceLogs, srcLen) + wrappers := make([]*otlplogs.ResourceLogs, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceLogs((*es.orig)[i]).CopyTo(newResourceLogs(wrappers[i])) + } + *dest.orig = wrappers +} + +// Sort sorts the ResourceLogs elements within ResourceLogsSlice given the +// provided less function so that two instances of ResourceLogsSlice +// can be compared. +func (es ResourceLogsSlice) Sort(less func(a, b ResourceLogs) bool) { + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go new file mode 100644 index 0000000000000..c4d399103b2e2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ScopeLogs is a collection of logs from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewScopeLogs function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeLogs struct { + orig *otlplogs.ScopeLogs +} + +func newScopeLogs(orig *otlplogs.ScopeLogs) ScopeLogs { + return ScopeLogs{orig} +} + +// NewScopeLogs creates a new empty ScopeLogs. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewScopeLogs() ScopeLogs { + return newScopeLogs(&otlplogs.ScopeLogs{}) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ScopeLogs) MoveTo(dest ScopeLogs) { + *dest.orig = *ms.orig + *ms.orig = otlplogs.ScopeLogs{} +} + +// Scope returns the scope associated with this ScopeLogs. +func (ms ScopeLogs) Scope() pcommon.InstrumentationScope { + return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope)) +} + +// SchemaUrl returns the schemaurl associated with this ScopeLogs. +func (ms ScopeLogs) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ScopeLogs. +func (ms ScopeLogs) SetSchemaUrl(v string) { + ms.orig.SchemaUrl = v +} + +// LogRecords returns the LogRecords associated with this ScopeLogs. +func (ms ScopeLogs) LogRecords() LogRecordSlice { + return newLogRecordSlice(&ms.orig.LogRecords) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ScopeLogs) CopyTo(dest ScopeLogs) { + ms.Scope().CopyTo(dest.Scope()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.LogRecords().CopyTo(dest.LogRecords()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go new file mode 100644 index 0000000000000..be34e05f1350e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plog + +import ( + "sort" + + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +// ScopeLogsSlice logically represents a slice of ScopeLogs. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewScopeLogsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeLogsSlice struct { + orig *[]*otlplogs.ScopeLogs +} + +func newScopeLogsSlice(orig *[]*otlplogs.ScopeLogs) ScopeLogsSlice { + return ScopeLogsSlice{orig} +} + +// NewScopeLogsSlice creates a ScopeLogsSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewScopeLogsSlice() ScopeLogsSlice { + orig := []*otlplogs.ScopeLogs(nil) + return newScopeLogsSlice(&orig) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewScopeLogsSlice()". +func (es ScopeLogsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ScopeLogsSlice) At(i int) ScopeLogs { + return newScopeLogs((*es.orig)[i]) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ScopeLogsSlice can be initialized: +// +// es := NewScopeLogsSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ScopeLogsSlice) EnsureCapacity(newCap int) { + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlplogs.ScopeLogs, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ScopeLogs. +// It returns the newly added ScopeLogs. +func (es ScopeLogsSlice) AppendEmpty() ScopeLogs { + *es.orig = append(*es.orig, &otlplogs.ScopeLogs{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ScopeLogsSlice) MoveAndAppendTo(dest ScopeLogsSlice) { + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ScopeLogsSlice) RemoveIf(f func(ScopeLogs) bool) { + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ScopeLogsSlice) CopyTo(dest ScopeLogsSlice) { + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newScopeLogs((*es.orig)[i]).CopyTo(newScopeLogs((*dest.orig)[i])) + } + return + } + origs := make([]otlplogs.ScopeLogs, srcLen) + wrappers := make([]*otlplogs.ScopeLogs, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newScopeLogs((*es.orig)[i]).CopyTo(newScopeLogs(wrappers[i])) + } + *dest.orig = wrappers +} + +// Sort sorts the ScopeLogs elements within ScopeLogsSlice given the +// provided less function so that two instances of ScopeLogsSlice +// can be compared. +func (es ScopeLogsSlice) Sort(less func(a, b ScopeLogs) bool) { + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/json.go b/vendor/go.opentelemetry.io/collector/pdata/plog/json.go new file mode 100644 index 0000000000000..1a877c68b30e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/json.go @@ -0,0 +1,131 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +import ( + "bytes" + "fmt" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +type JSONMarshaler struct{} + +func (*JSONMarshaler) MarshalLogs(ld Logs) ([]byte, error) { + buf := bytes.Buffer{} + pb := internal.LogsToProto(internal.Logs(ld)) + err := json.Marshal(&buf, &pb) + return buf.Bytes(), err +} + +var _ Unmarshaler = (*JSONUnmarshaler)(nil) + +type JSONUnmarshaler struct{} + +func (*JSONUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) { + iter := jsoniter.ConfigFastest.BorrowIterator(buf) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + ld := NewLogs() + ld.unmarshalJsoniter(iter) + if iter.Error != nil { + return Logs{}, iter.Error + } + otlp.MigrateLogs(ld.getOrig().ResourceLogs) + return ld, nil +} + +func (ms Logs) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource_logs", "resourceLogs": + iter.ReadArrayCB(func(iterator *jsoniter.Iterator) bool { + ms.ResourceLogs().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (ms ResourceLogs) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource": + json.ReadResource(iter, &ms.orig.Resource) + case "scope_logs", "scopeLogs": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.ScopeLogs().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (ms ScopeLogs) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "scope": + json.ReadScope(iter, &ms.orig.Scope) + case "log_records", "logRecords": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.LogRecords().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (ms LogRecord) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "timeUnixNano", "time_unix_nano": + ms.orig.TimeUnixNano = json.ReadUint64(iter) + case "observed_time_unix_nano", "observedTimeUnixNano": + ms.orig.ObservedTimeUnixNano = json.ReadUint64(iter) + case "severity_number", "severityNumber": + ms.orig.SeverityNumber = otlplogs.SeverityNumber(json.ReadEnumValue(iter, otlplogs.SeverityNumber_value)) + case "severity_text", "severityText": + ms.orig.SeverityText = iter.ReadString() + case "body": + json.ReadValue(iter, &ms.orig.Body) + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "droppedAttributesCount", "dropped_attributes_count": + ms.orig.DroppedAttributesCount = json.ReadUint32(iter) + case "flags": + ms.orig.Flags = json.ReadUint32(iter) + case "traceId", "trace_id": + if err := ms.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("readLog.traceId", fmt.Sprintf("parse trace_id:%v", err)) + } + case "spanId", "span_id": + if err := ms.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("readLog.spanId", fmt.Sprintf("parse span_id:%v", err)) + } + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/log_record_flags.go b/vendor/go.opentelemetry.io/collector/pdata/plog/log_record_flags.go new file mode 100644 index 0000000000000..4a866bcf64122 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/log_record_flags.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +const isSampledMask = uint32(1) + +var DefaultLogRecordFlags = LogRecordFlags(0) + +// LogRecordFlags defines flags for the LogRecord. The 8 least significant bits are the trace flags as +// defined in W3C Trace Context specification. 24 most significant bits are reserved and must be set to 0. +type LogRecordFlags uint32 + +// IsSampled returns true if the LogRecordFlags contains the IsSampled flag. +func (ms LogRecordFlags) IsSampled() bool { + return uint32(ms)&isSampledMask != 0 +} + +// WithIsSampled returns a new LogRecordFlags, with the IsSampled flag set to the given value. +func (ms LogRecordFlags) WithIsSampled(b bool) LogRecordFlags { + orig := uint32(ms) + if b { + orig |= isSampledMask + } else { + orig &^= isSampledMask + } + return LogRecordFlags(orig) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go new file mode 100644 index 0000000000000..f637077ddb498 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" +) + +// Logs is the top-level struct that is propagated through the logs pipeline. +// Use NewLogs to create new instance, zero-initialized instance is not valid for use. +type Logs internal.Logs + +func newLogs(orig *otlpcollectorlog.ExportLogsServiceRequest) Logs { + return Logs(internal.NewLogs(orig)) +} + +func (ms Logs) getOrig() *otlpcollectorlog.ExportLogsServiceRequest { + return internal.GetOrigLogs(internal.Logs(ms)) +} + +// NewLogs creates a new Logs struct. +func NewLogs() Logs { + return newLogs(&otlpcollectorlog.ExportLogsServiceRequest{}) +} + +// CopyTo copies the Logs instance overriding the destination. +func (ms Logs) CopyTo(dest Logs) { + ms.ResourceLogs().CopyTo(dest.ResourceLogs()) +} + +// LogRecordCount calculates the total number of log records. +func (ms Logs) LogRecordCount() int { + logCount := 0 + rss := ms.ResourceLogs() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + ill := rs.ScopeLogs() + for i := 0; i < ill.Len(); i++ { + logs := ill.At(i) + logCount += logs.LogRecords().Len() + } + } + return logCount +} + +// ResourceLogs returns the ResourceLogsSlice associated with this Logs. +func (ms Logs) ResourceLogs() ResourceLogsSlice { + return newResourceLogsSlice(&ms.getOrig().ResourceLogs) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go b/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go new file mode 100644 index 0000000000000..bb102591bf2d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +var _ MarshalSizer = (*ProtoMarshaler)(nil) + +type ProtoMarshaler struct{} + +func (e *ProtoMarshaler) MarshalLogs(ld Logs) ([]byte, error) { + pb := internal.LogsToProto(internal.Logs(ld)) + return pb.Marshal() +} + +func (e *ProtoMarshaler) LogsSize(ld Logs) int { + pb := internal.LogsToProto(internal.Logs(ld)) + return pb.Size() +} + +var _ Unmarshaler = (*ProtoUnmarshaler)(nil) + +type ProtoUnmarshaler struct{} + +func (d *ProtoUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) { + pb := otlplogs.LogsData{} + err := pb.Unmarshal(buf) + return Logs(internal.LogsFromProto(pb)), err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportpartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportpartialsuccess.go new file mode 100644 index 0000000000000..5ab21e088974f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/generated_exportpartialsuccess.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package plogotlp + +import ( + otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" +) + +// ExportPartialSuccess represents the details of a partially successful export request. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewExportPartialSuccess function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExportPartialSuccess struct { + orig *otlpcollectorlog.ExportLogsPartialSuccess +} + +func newExportPartialSuccess(orig *otlpcollectorlog.ExportLogsPartialSuccess) ExportPartialSuccess { + return ExportPartialSuccess{orig} +} + +// NewExportPartialSuccess creates a new empty ExportPartialSuccess. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewExportPartialSuccess() ExportPartialSuccess { + return newExportPartialSuccess(&otlpcollectorlog.ExportLogsPartialSuccess{}) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { + *dest.orig = *ms.orig + *ms.orig = otlpcollectorlog.ExportLogsPartialSuccess{} +} + +// RejectedLogRecords returns the rejectedlogrecords associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) RejectedLogRecords() int64 { + return ms.orig.RejectedLogRecords +} + +// SetRejectedLogRecords replaces the rejectedlogrecords associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetRejectedLogRecords(v int64) { + ms.orig.RejectedLogRecords = v +} + +// ErrorMessage returns the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) ErrorMessage() string { + return ms.orig.ErrorMessage +} + +// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetErrorMessage(v string) { + ms.orig.ErrorMessage = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { + dest.SetRejectedLogRecords(ms.RejectedLogRecords()) + dest.SetErrorMessage(ms.ErrorMessage()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/grpc.go b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/grpc.go new file mode 100644 index 0000000000000..4a7e66cb852d0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/grpc.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp" + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +// GRPCClient is the client API for OTLP-GRPC Logs service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCClient interface { + // Export plog.Logs to the server. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) + + // unexported disallow implementation of the GRPCClient. + unexported() +} + +// NewGRPCClient returns a new GRPCClient connected using the given connection. +func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { + return &grpcClient{rawClient: otlpcollectorlog.NewLogsServiceClient(cc)} +} + +type grpcClient struct { + rawClient otlpcollectorlog.LogsServiceClient +} + +func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { + rsp, err := c.rawClient.Export(ctx, request.orig, opts...) + return ExportResponse{orig: rsp}, err +} + +func (c *grpcClient) unexported() {} + +// GRPCServer is the server API for OTLP gRPC LogsService service. +// Implementations MUST embed UnimplementedGRPCServer. +type GRPCServer interface { + // Export is called every time a new request is received. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, ExportRequest) (ExportResponse, error) + + // unexported disallow implementation of the GRPCServer. + unexported() +} + +var _ GRPCServer = (*UnimplementedGRPCServer)(nil) + +// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations. +type UnimplementedGRPCServer struct{} + +func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) { + return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func (*UnimplementedGRPCServer) unexported() {} + +// RegisterGRPCServer registers the Server to the grpc.Server. +func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { + otlpcollectorlog.RegisterLogsServiceServer(s, &rawLogsServer{srv: srv}) +} + +type rawLogsServer struct { + srv GRPCServer +} + +func (s rawLogsServer) Export(ctx context.Context, request *otlpcollectorlog.ExportLogsServiceRequest) (*otlpcollectorlog.ExportLogsServiceResponse, error) { + otlp.MigrateLogs(request.ResourceLogs) + rsp, err := s.srv.Export(ctx, ExportRequest{orig: request}) + return rsp.orig, err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/request.go b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/request.go new file mode 100644 index 0000000000000..786d83b7c440e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/request.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp" + +import ( + "bytes" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/otlp" + "go.opentelemetry.io/collector/pdata/plog" +) + +var jsonUnmarshaler = &plog.JSONUnmarshaler{} + +// ExportRequest represents the request for gRPC/HTTP client/server. +// It's a wrapper for plog.Logs data. +type ExportRequest struct { + orig *otlpcollectorlog.ExportLogsServiceRequest +} + +// NewExportRequest returns an empty ExportRequest. +func NewExportRequest() ExportRequest { + return ExportRequest{orig: &otlpcollectorlog.ExportLogsServiceRequest{}} +} + +// NewExportRequestFromLogs returns a ExportRequest from plog.Logs. +// Because ExportRequest is a wrapper for plog.Logs, +// any changes to the provided Logs struct will be reflected in the ExportRequest and vice versa. +func NewExportRequestFromLogs(ld plog.Logs) ExportRequest { + return ExportRequest{orig: internal.GetOrigLogs(internal.Logs(ld))} +} + +// MarshalProto marshals ExportRequest into proto bytes. +func (ms ExportRequest) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportRequest from proto bytes. +func (ms ExportRequest) UnmarshalProto(data []byte) error { + if err := ms.orig.Unmarshal(data); err != nil { + return err + } + otlp.MigrateLogs(ms.orig.ResourceLogs) + return nil +} + +// MarshalJSON marshals ExportRequest into JSON bytes. +func (ms ExportRequest) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportRequest from JSON bytes. +func (ms ExportRequest) UnmarshalJSON(data []byte) error { + ld, err := jsonUnmarshaler.UnmarshalLogs(data) + if err != nil { + return err + } + *ms.orig = *internal.GetOrigLogs(internal.Logs(ld)) + return nil +} + +func (ms ExportRequest) Logs() plog.Logs { + return plog.Logs(internal.NewLogs(ms.orig)) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/response.go new file mode 100644 index 0000000000000..439c2560ffa2f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/plogotlp/response.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp" + +import ( + "bytes" + + jsoniter "github.com/json-iterator/go" + + otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" + "go.opentelemetry.io/collector/pdata/internal/json" +) + +// ExportResponse represents the response for gRPC/HTTP client/server. +type ExportResponse struct { + orig *otlpcollectorlog.ExportLogsServiceResponse +} + +// NewExportResponse returns an empty ExportResponse. +func NewExportResponse() ExportResponse { + return ExportResponse{orig: &otlpcollectorlog.ExportLogsServiceResponse{}} +} + +// MarshalProto marshals ExportResponse into proto bytes. +func (ms ExportResponse) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportResponse from proto bytes. +func (ms ExportResponse) UnmarshalProto(data []byte) error { + return ms.orig.Unmarshal(data) +} + +// MarshalJSON marshals ExportResponse into JSON bytes. +func (ms ExportResponse) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportResponse from JSON bytes. +func (ms ExportResponse) UnmarshalJSON(data []byte) error { + iter := jsoniter.ConfigFastest.BorrowIterator(data) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + ms.unmarshalJsoniter(iter) + return iter.Error +} + +// PartialSuccess returns the ExportPartialSuccess associated with this ExportResponse. +func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { + return newExportPartialSuccess(&ms.orig.PartialSuccess) +} + +func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "partial_success", "partialSuccess": + ms.PartialSuccess().unmarshalJsoniter(iter) + default: + iter.Skip() + } + return true + }) +} + +func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iterator *jsoniter.Iterator, f string) bool { + switch f { + case "rejected_log_records", "rejectedLogRecords": + ms.orig.RejectedLogRecords = json.ReadInt64(iter) + case "error_message", "errorMessage": + ms.orig.ErrorMessage = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go b/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go new file mode 100644 index 0000000000000..53a9d4179c442 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package plog // import "go.opentelemetry.io/collector/pdata/plog" + +import ( + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +// SeverityNumber represents severity number of a log record. +type SeverityNumber int32 + +const ( + SeverityNumberUnspecified = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED) + SeverityNumberTrace = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE) + SeverityNumberTrace2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE2) + SeverityNumberTrace3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE3) + SeverityNumberTrace4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE4) + SeverityNumberDebug = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG) + SeverityNumberDebug2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG2) + SeverityNumberDebug3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG3) + SeverityNumberDebug4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG4) + SeverityNumberInfo = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO) + SeverityNumberInfo2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO2) + SeverityNumberInfo3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO3) + SeverityNumberInfo4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO4) + SeverityNumberWarn = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN) + SeverityNumberWarn2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN2) + SeverityNumberWarn3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN3) + SeverityNumberWarn4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN4) + SeverityNumberError = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR) + SeverityNumberError2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR2) + SeverityNumberError3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR3) + SeverityNumberError4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR4) + SeverityNumberFatal = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL) + SeverityNumberFatal2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL2) + SeverityNumberFatal3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL3) + SeverityNumberFatal4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL4) +) + +// String returns the string representation of the SeverityNumber. +func (sn SeverityNumber) String() string { + switch sn { + case SeverityNumberUnspecified: + return "Unspecified" + case SeverityNumberTrace: + return "Trace" + case SeverityNumberTrace2: + return "Trace2" + case SeverityNumberTrace3: + return "Trace3" + case SeverityNumberTrace4: + return "Trace4" + case SeverityNumberDebug: + return "Debug" + case SeverityNumberDebug2: + return "Debug2" + case SeverityNumberDebug3: + return "Debug3" + case SeverityNumberDebug4: + return "Debug4" + case SeverityNumberInfo: + return "Info" + case SeverityNumberInfo2: + return "Info2" + case SeverityNumberInfo3: + return "Info3" + case SeverityNumberInfo4: + return "Info4" + case SeverityNumberWarn: + return "Warn" + case SeverityNumberWarn2: + return "Warn2" + case SeverityNumberWarn3: + return "Warn3" + case SeverityNumberWarn4: + return "Warn4" + case SeverityNumberError: + return "Error" + case SeverityNumberError2: + return "Error2" + case SeverityNumberError3: + return "Error3" + case SeverityNumberError4: + return "Error4" + case SeverityNumberFatal: + return "Fatal" + case SeverityNumberFatal2: + return "Fatal2" + case SeverityNumberFatal3: + return "Fatal3" + case SeverityNumberFatal4: + return "Fatal4" + } + return "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index b2fbe07841ca3..f2f20e3b93008 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -21,19 +21,25 @@ import ( "github.com/felixge/httpsnoop" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" "go.opentelemetry.io/otel/trace" ) -// middleware is an http middleware which wraps the next handler in a span. -type middleware struct { +var _ http.Handler = &Handler{} + +// Handler is http middleware that corresponds to the http.Handler interface and +// is designed to wrap a http.Mux (or equivalent), while individual routes on +// the mux are wrapped with WithRouteTag. A Handler will add various attributes +// to the span using the attribute.Keys defined in this package. +type Handler struct { operation string server string + handler http.Handler tracer trace.Tracer meter metric.Meter @@ -53,17 +59,11 @@ func defaultHandlerFormatter(operation string, _ *http.Request) string { return operation } -// NewHandler wraps the passed handler in a span named after the operation and -// enriches it with metrics. +// NewHandler wraps the passed handler, functioning like middleware, in a span +// named after the operation and with any provided Options. func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { - return NewMiddleware(operation, opts...)(handler) -} - -// NewMiddleware returns a tracing and metrics instrumentation middleware. -// The handler returned by the middleware wraps a handler -// in a span named after the operation and enriches it with metrics. -func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { - h := middleware{ + h := Handler{ + handler: handler, operation: operation, } @@ -76,14 +76,10 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han h.configure(c) h.createMeasures() - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - h.serveHTTP(w, r, next) - }) - } + return &h } -func (h *middleware) configure(c *config) { +func (h *Handler) configure(c *config) { h.tracer = c.Tracer h.meter = c.Meter h.propagators = c.Propagators @@ -103,7 +99,7 @@ func handleErr(err error) { } } -func (h *middleware) createMeasures() { +func (h *Handler) createMeasures() { h.counters = make(map[string]metric.Int64Counter) h.valueRecorders = make(map[string]metric.Float64Histogram) @@ -121,21 +117,20 @@ func (h *middleware) createMeasures() { h.valueRecorders[ServerLatency] = serverLatencyMeasure } -// serveHTTP sets up tracing and calls the given next http.Handler with the span -// context injected into the request context. -func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { +// ServeHTTP serves HTTP requests (http.Handler). +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { requestStartTime := time.Now() for _, f := range h.filters { if !f(r) { // Simply pass through to the handler if a filter rejects the request - next.ServeHTTP(w, r) + h.handler.ServeHTTP(w, r) return } } ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), + trace.WithAttributes(httpconv.ServerRequest(h.server, r)...), } if h.server != "" { hostAttr := semconv.NetHostName(h.server) @@ -214,12 +209,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http labeler := &Labeler{} ctx = injectLabeler(ctx, labeler) - next.ServeHTTP(w, r.WithContext(ctx)) + h.handler.ServeHTTP(w, r.WithContext(ctx)) setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) + attributes := append(labeler.Get(), httpconv.ServerRequest(h.server, r)...) if rww.statusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } @@ -250,7 +245,7 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, if statusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) } - span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) + span.SetStatus(httpconv.ServerStatus(statusCode)) if werr != nil && werr != io.EOF { attributes = append(attributes, WriteErrorKey.String(werr.Error())) @@ -258,18 +253,12 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, span.SetAttributes(attributes...) } -// WithRouteTag annotates spans and metrics with the provided route name -// with HTTP route attribute. +// WithRouteTag annotates a span with the provided route name using the +// RouteKey Tag. func WithRouteTag(route string, h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - attr := semconv.HTTPRouteKey.String(route) - span := trace.SpanFromContext(r.Context()) - span.SetAttributes(attr) - - labeler, _ := LabelerFromContext(r.Context()) - labeler.Add(attr) - + span.SetAttributes(semconv.HTTPRoute(route)) h.ServeHTTP(w, r) }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go deleted file mode 100644 index edf4ce3d315a3..0000000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -// Generate semconvutil package: -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go deleted file mode 100644 index d3dede9ebbda1..0000000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ /dev/null @@ -1,552 +0,0 @@ -// Code created by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconvutil/httpconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -import ( - "fmt" - "net/http" - "strings" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" -) - -// HTTPClientResponse returns trace attributes for an HTTP response received by a -// client from a server. It will return the following attributes if the related -// values are defined in resp: "http.status.code", -// "http.response_content_length". -// -// This does not add all OpenTelemetry required attributes for an HTTP event, -// it assumes ClientRequest was used to create the span with a complete set of -// attributes. If a complete set of attributes can be generated using the -// request contained in resp. For example: -// -// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) -func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { - return hc.ClientResponse(resp) -} - -// HTTPClientRequest returns trace attributes for an HTTP request made by a client. -// The following attributes are always returned: "http.url", "http.flavor", -// "http.method", "net.peer.name". The following attributes are returned if the -// related values are defined in req: "net.peer.port", "http.user_agent", -// "http.request_content_length", "enduser.id". -func HTTPClientRequest(req *http.Request) []attribute.KeyValue { - return hc.ClientRequest(req) -} - -// HTTPClientStatus returns a span status code and message for an HTTP status code -// value received by a client. -func HTTPClientStatus(code int) (codes.Code, string) { - return hc.ClientStatus(code) -} - -// HTTPServerRequest returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "http.target", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port", -// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", -// "http.client_ip". -func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { - return hc.ServerRequest(server, req) -} - -// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port". -func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - return hc.ServerRequestMetrics(server, req) -} - -// HTTPServerStatus returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func HTTPServerStatus(code int) (codes.Code, string) { - return hc.ServerStatus(code) -} - -// HTTPRequestHeader returns the contents of h as attributes. -// -// Instrumentation should require an explicit configuration of which headers to -// captured and then prune what they pass here. Including all headers can be a -// security risk - explicit configuration helps avoid leaking sensitive -// information. -// -// The User-Agent header is already captured in the http.user_agent attribute -// from ClientRequest and ServerRequest. Instrumentation may provide an option -// to capture that header here even though it is not recommended. Otherwise, -// instrumentation should filter that out of what is passed. -func HTTPRequestHeader(h http.Header) []attribute.KeyValue { - return hc.RequestHeader(h) -} - -// HTTPResponseHeader returns the contents of h as attributes. -// -// Instrumentation should require an explicit configuration of which headers to -// captured and then prune what they pass here. Including all headers can be a -// security risk - explicit configuration helps avoid leaking sensitive -// information. -// -// The User-Agent header is already captured in the http.user_agent attribute -// from ClientRequest and ServerRequest. Instrumentation may provide an option -// to capture that header here even though it is not recommended. Otherwise, -// instrumentation should filter that out of what is passed. -func HTTPResponseHeader(h http.Header) []attribute.KeyValue { - return hc.ResponseHeader(h) -} - -// httpConv are the HTTP semantic convention attributes defined for a version -// of the OpenTelemetry specification. -type httpConv struct { - NetConv *netConv - - EnduserIDKey attribute.Key - HTTPClientIPKey attribute.Key - HTTPFlavorKey attribute.Key - HTTPMethodKey attribute.Key - HTTPRequestContentLengthKey attribute.Key - HTTPResponseContentLengthKey attribute.Key - HTTPRouteKey attribute.Key - HTTPSchemeHTTP attribute.KeyValue - HTTPSchemeHTTPS attribute.KeyValue - HTTPStatusCodeKey attribute.Key - HTTPTargetKey attribute.Key - HTTPURLKey attribute.Key - HTTPUserAgentKey attribute.Key -} - -var hc = &httpConv{ - NetConv: nc, - - EnduserIDKey: semconv.EnduserIDKey, - HTTPClientIPKey: semconv.HTTPClientIPKey, - HTTPFlavorKey: semconv.HTTPFlavorKey, - HTTPMethodKey: semconv.HTTPMethodKey, - HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, - HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, - HTTPRouteKey: semconv.HTTPRouteKey, - HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, - HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, - HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, - HTTPTargetKey: semconv.HTTPTargetKey, - HTTPURLKey: semconv.HTTPURLKey, - HTTPUserAgentKey: semconv.HTTPUserAgentKey, -} - -// ClientResponse returns attributes for an HTTP response received by a client -// from a server. The following attributes are returned if the related values -// are defined in resp: "http.status.code", "http.response_content_length". -// -// This does not add all OpenTelemetry required attributes for an HTTP event, -// it assumes ClientRequest was used to create the span with a complete set of -// attributes. If a complete set of attributes can be generated using the -// request contained in resp. For example: -// -// append(ClientResponse(resp), ClientRequest(resp.Request)...) -func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { - var n int - if resp.StatusCode > 0 { - n++ - } - if resp.ContentLength > 0 { - n++ - } - - attrs := make([]attribute.KeyValue, 0, n) - if resp.StatusCode > 0 { - attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) - } - if resp.ContentLength > 0 { - attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) - } - return attrs -} - -// ClientRequest returns attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.url", "http.flavor", -// "http.method", "net.peer.name". The following attributes are returned if the -// related values are defined in req: "net.peer.port", "http.user_agent", -// "http.request_content_length", "enduser.id". -func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { - n := 3 // URL, peer name, proto, and method. - var h string - if req.URL != nil { - h = req.URL.Host - } - peer, p := firstHostPort(h, req.Header.Get("Host")) - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) - if port > 0 { - n++ - } - useragent := req.UserAgent() - if useragent != "" { - n++ - } - if req.ContentLength > 0 { - n++ - } - userID, _, hasUserID := req.BasicAuth() - if hasUserID { - n++ - } - attrs := make([]attribute.KeyValue, 0, n) - - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.flavor(req.Proto)) - - var u string - if req.URL != nil { - // Remove any username/password info that may be in the URL. - userinfo := req.URL.User - req.URL.User = nil - u = req.URL.String() - // Restore any username/password info that was removed. - req.URL.User = userinfo - } - attrs = append(attrs, c.HTTPURLKey.String(u)) - - attrs = append(attrs, c.NetConv.PeerName(peer)) - if port > 0 { - attrs = append(attrs, c.NetConv.PeerPort(port)) - } - - if useragent != "" { - attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) - } - - if l := req.ContentLength; l > 0 { - attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) - } - - if hasUserID { - attrs = append(attrs, c.EnduserIDKey.String(userID)) - } - - return attrs -} - -// ServerRequest returns attributes for an HTTP request received by a server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "http.target", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port", -// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", -// "http.client_ip". -func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { - // TODO: This currently does not add the specification required - // `http.target` attribute. It has too high of a cardinality to safely be - // added. An alternate should be added, or this comment removed, when it is - // addressed by the specification. If it is ultimately decided to continue - // not including the attribute, the HTTPTargetKey field of the httpConv - // should be removed as well. - - n := 4 // Method, scheme, proto, and host name. - var host string - var p int - if server == "" { - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - peer, peerPort := splitHostPort(req.RemoteAddr) - if peer != "" { - n++ - if peerPort > 0 { - n++ - } - } - useragent := req.UserAgent() - if useragent != "" { - n++ - } - userID, _, hasUserID := req.BasicAuth() - if hasUserID { - n++ - } - clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) - if clientIP != "" { - n++ - } - attrs := make([]attribute.KeyValue, 0, n) - - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.flavor(req.Proto)) - attrs = append(attrs, c.NetConv.HostName(host)) - - if hostPort > 0 { - attrs = append(attrs, c.NetConv.HostPort(hostPort)) - } - - if peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) - if peerPort > 0 { - attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) - } - } - - if useragent != "" { - attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) - } - - if hasUserID { - attrs = append(attrs, c.EnduserIDKey.String(userID)) - } - - if clientIP != "" { - attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) - } - - return attrs -} - -// ServerRequestMetrics returns metric attributes for an HTTP request received -// by a server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port". -func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - // TODO: This currently does not add the specification required - // `http.target` attribute. It has too high of a cardinality to safely be - // added. An alternate should be added, or this comment removed, when it is - // addressed by the specification. If it is ultimately decided to continue - // not including the attribute, the HTTPTargetKey field of the httpConv - // should be removed as well. - - n := 4 // Method, scheme, proto, and host name. - var host string - var p int - if server == "" { - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - attrs := make([]attribute.KeyValue, 0, n) - - attrs = append(attrs, c.methodMetric(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.flavor(req.Proto)) - attrs = append(attrs, c.NetConv.HostName(host)) - - if hostPort > 0 { - attrs = append(attrs, c.NetConv.HostPort(hostPort)) - } - - return attrs -} - -func (c *httpConv) method(method string) attribute.KeyValue { - if method == "" { - return c.HTTPMethodKey.String(http.MethodGet) - } - return c.HTTPMethodKey.String(method) -} - -func (c *httpConv) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return c.HTTPMethodKey.String(method) -} - -func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { - return c.HTTPSchemeHTTPS - } - return c.HTTPSchemeHTTP -} - -func (c *httpConv) flavor(proto string) attribute.KeyValue { - switch proto { - case "HTTP/1.0": - return c.HTTPFlavorKey.String("1.0") - case "HTTP/1.1": - return c.HTTPFlavorKey.String("1.1") - case "HTTP/2": - return c.HTTPFlavorKey.String("2.0") - case "HTTP/3": - return c.HTTPFlavorKey.String("3.0") - default: - return c.HTTPFlavorKey.String(proto) - } -} - -func serverClientIP(xForwardedFor string) string { - if idx := strings.Index(xForwardedFor, ","); idx >= 0 { - xForwardedFor = xForwardedFor[:idx] - } - return xForwardedFor -} - -func requiredHTTPPort(https bool, port int) int { // nolint:revive - if https { - if port > 0 && port != 443 { - return port - } - } else { - if port > 0 && port != 80 { - return port - } - } - return -1 -} - -// Return the request host and port from the first non-empty source. -func firstHostPort(source ...string) (host string, port int) { - for _, hostport := range source { - host, port = splitHostPort(hostport) - if host != "" || port > 0 { - break - } - } - return -} - -// RequestHeader returns the contents of h as OpenTelemetry attributes. -func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { - return c.header("http.request.header", h) -} - -// ResponseHeader returns the contents of h as OpenTelemetry attributes. -func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { - return c.header("http.response.header", h) -} - -func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { - key := func(k string) attribute.Key { - k = strings.ToLower(k) - k = strings.ReplaceAll(k, "-", "_") - k = fmt.Sprintf("%s.%s", prefix, k) - return attribute.Key(k) - } - - attrs := make([]attribute.KeyValue, 0, len(h)) - for k, v := range h { - attrs = append(attrs, key(k).StringSlice(v)) - } - return attrs -} - -// ClientStatus returns a span status code and message for an HTTP status code -// value received by a client. -func (c *httpConv) ClientStatus(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 400 { - return codes.Error, "" - } - return codes.Unset, "" -} - -// ServerStatus returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func (c *httpConv) ServerStatus(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 500 { - return codes.Error, "" - } - return codes.Unset, "" -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index e835cac12e4d9..9dda7e1a95718 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -20,10 +20,10 @@ import ( "net/http" "net/http/httptrace" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" "go.opentelemetry.io/otel/trace" ) @@ -109,8 +109,8 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + r = r.WithContext(ctx) + span.SetAttributes(httpconv.ClientRequest(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) @@ -121,8 +121,8 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(httpconv.ClientResponse(res)...) + span.SetStatus(httpconv.ClientStatus(res.StatusCode)) res.Body = newWrappedBody(span, res.Body) return res, err diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 8f3f53a9588e5..bbcbb74160777 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.44.0" + return "0.42.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go new file mode 100644 index 0000000000000..12d6b520f5289 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go @@ -0,0 +1,404 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// HTTPConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type HTTPConv struct { + NetConv *NetConv + + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.proto(req.Proto)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the HTTPConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.proto(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + return attrs +} + +func (c *HTTPConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func (c *HTTPConv) proto(proto string) attribute.KeyValue { + switch proto { + case "HTTP/1.0": + return c.HTTPFlavorKey.String("1.0") + case "HTTP/1.1": + return c.HTTPFlavorKey.String("1.1") + case "HTTP/2": + return c.HTTPFlavorKey.String("2.0") + case "HTTP/3": + return c.HTTPFlavorKey.String("3.0") + default: + return c.HTTPFlavorKey.String(proto) + } +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { + stat, valid := validateHTTPStatusCode(code) + if !valid { + return stat, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return stat, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { + stat, valid := validateHTTPStatusCode(code) + if !valid { + return stat, fmt.Sprintf("Invalid HTTP status code %d", code) + } + + if code/100 == 4 { + return codes.Unset, "" + } + return stat, "" +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go similarity index 72% rename from vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go rename to vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go index bde8893437d70..4a711133a0269 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go @@ -1,7 +1,5 @@ -// Code created by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconvutil/netconv.go.tmpl - // Copyright The OpenTelemetry Authors +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -14,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" +package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" import ( "net" @@ -22,37 +20,11 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) -// NetTransport returns a trace attribute describing the transport protocol of the -// passed network. See the net.Dial for information about acceptable network -// values. -func NetTransport(network string) attribute.KeyValue { - return nc.Transport(network) -} - -// NetClient returns trace attributes for a client network connection to address. -// See net.Dial for information about acceptable address values, address should -// be the same as the one used to create conn. If conn is nil, only network -// peer attributes will be returned that describe address. Otherwise, the -// socket level information about conn will also be included. -func NetClient(address string, conn net.Conn) []attribute.KeyValue { - return nc.Client(address, conn) -} - -// NetServer returns trace attributes for a network listener listening at address. -// See net.Listen for information about acceptable address values, address -// should be the same as the one used to create ln. If ln is nil, only network -// host attributes will be returned that describe address. Otherwise, the -// socket level information about ln will also be included. -func NetServer(address string, ln net.Listener) []attribute.KeyValue { - return nc.Server(address, ln) -} - -// netConv are the network semantic convention attributes defined for a version +// NetConv are the network semantic convention attributes defined for a version // of the OpenTelemetry specification. -type netConv struct { +type NetConv struct { NetHostNameKey attribute.Key NetHostPortKey attribute.Key NetPeerNameKey attribute.Key @@ -68,23 +40,7 @@ type netConv struct { NetTransportInProc attribute.KeyValue } -var nc = &netConv{ - NetHostNameKey: semconv.NetHostNameKey, - NetHostPortKey: semconv.NetHostPortKey, - NetPeerNameKey: semconv.NetPeerNameKey, - NetPeerPortKey: semconv.NetPeerPortKey, - NetSockFamilyKey: semconv.NetSockFamilyKey, - NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, - NetSockPeerPortKey: semconv.NetSockPeerPortKey, - NetSockHostAddrKey: semconv.NetSockHostAddrKey, - NetSockHostPortKey: semconv.NetSockHostPortKey, - NetTransportOther: semconv.NetTransportOther, - NetTransportTCP: semconv.NetTransportTCP, - NetTransportUDP: semconv.NetTransportUDP, - NetTransportInProc: semconv.NetTransportInProc, -} - -func (c *netConv) Transport(network string) attribute.KeyValue { +func (c *NetConv) Transport(network string) attribute.KeyValue { switch network { case "tcp", "tcp4", "tcp6": return c.NetTransportTCP @@ -99,7 +55,7 @@ func (c *netConv) Transport(network string) attribute.KeyValue { } // Host returns attributes for a network host address. -func (c *netConv) Host(address string) []attribute.KeyValue { +func (c *NetConv) Host(address string) []attribute.KeyValue { h, p := splitHostPort(address) var n int if h != "" { @@ -126,7 +82,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { // be the same as the one used to create ln. If ln is nil, only network host // attributes will be returned that describe address. Otherwise, the socket // level information about ln will also be included. -func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { +func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue { if ln == nil { return c.Host(address) } @@ -167,11 +123,11 @@ func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { return attr } -func (c *netConv) HostName(name string) attribute.KeyValue { +func (c *NetConv) HostName(name string) attribute.KeyValue { return c.NetHostNameKey.String(name) } -func (c *netConv) HostPort(port int) attribute.KeyValue { +func (c *NetConv) HostPort(port int) attribute.KeyValue { return c.NetHostPortKey.Int(port) } @@ -180,7 +136,7 @@ func (c *netConv) HostPort(port int) attribute.KeyValue { // the same as the one used to create conn. If conn is nil, only network peer // attributes will be returned that describe address. Otherwise, the socket // level information about conn will also be included. -func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { +func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue { if conn == nil { return c.Peer(address) } @@ -290,7 +246,7 @@ func positiveInt(ints ...int) int { } // Peer returns attributes for a network peer address. -func (c *netConv) Peer(address string) []attribute.KeyValue { +func (c *NetConv) Peer(address string) []attribute.KeyValue { h, p := splitHostPort(address) var n int if h != "" { @@ -312,19 +268,19 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { return attrs } -func (c *netConv) PeerName(name string) attribute.KeyValue { +func (c *NetConv) PeerName(name string) attribute.KeyValue { return c.NetPeerNameKey.String(name) } -func (c *netConv) PeerPort(port int) attribute.KeyValue { +func (c *NetConv) PeerPort(port int) attribute.KeyValue { return c.NetPeerPortKey.Int(port) } -func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { +func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue { return c.NetSockPeerAddrKey.String(addr) } -func (c *netConv) SockPeerPort(port int) attribute.KeyValue { +func (c *NetConv) SockPeerPort(port int) attribute.KeyValue { return c.NetSockPeerPortKey.Int(port) } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go new file mode 100644 index 0000000000000..fc43808fe44d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httpconv provides OpenTelemetry HTTP semantic conventions for +// tracing telemetry. +package httpconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal/v2" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +var ( + nc = &internal.NetConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, + } + + hc = &internal.HTTPConv{ + NetConv: nc, + + EnduserIDKey: semconv.EnduserIDKey, + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPFlavorKey: semconv.HTTPFlavorKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + HTTPUserAgentKey: semconv.HTTPUserAgentKey, + } +) + +// ClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func ClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// ClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func ClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func ClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// ServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func ServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func ServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// RequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func RequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// ResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func ResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go index b48798f0ab548..f8166d25712d4 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -54,8 +54,7 @@ type RestoreTableRequest struct { unknownFields protoimpl.UnknownFields // Required. The name of the instance in which to create the restored - // table. This instance must be in the same project as the source backup. - // Values are of the form `projects//instances/`. + // table. Values are of the form `projects//instances/`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. The id of the table to create and restore to. This // table must not already exist. The `table_id` appended to @@ -154,7 +153,8 @@ type RestoreTableMetadata struct { // The type of the restore source. SourceType RestoreSourceType `protobuf:"varint,2,opt,name=source_type,json=sourceType,proto3,enum=google.bigtable.admin.v2.RestoreSourceType" json:"source_type,omitempty"` // Information about the source used to restore the table, as specified by - // `source` in [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. + // `source` in + // [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. // // Types that are assignable to SourceInfo: // @@ -170,7 +170,8 @@ type RestoreTableMetadata struct { // may not be created if the table is already optimized or the restore was // not successful. OptimizeTableOperationName string `protobuf:"bytes,4,opt,name=optimize_table_operation_name,json=optimizeTableOperationName,proto3" json:"optimize_table_operation_name,omitempty"` - // The progress of the [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + // The progress of the + // [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] // operation. Progress *OperationProgress `protobuf:"bytes,5,opt,name=progress,proto3" json:"progress,omitempty"` } @@ -330,8 +331,8 @@ type CreateTableRequest struct { // Required. The unique name of the instance in which to create the table. // Values are of the form `projects/{project}/instances/{instance}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the + // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. // Maximum 50 characters. TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"` // Required. The Table to create. @@ -430,12 +431,12 @@ type CreateTableFromSnapshotRequest struct { // Required. The unique name of the instance in which to create the table. // Values are of the form `projects/{project}/instances/{instance}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the + // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"` - // Required. The unique name of the snapshot from which to restore the table. The - // snapshot and the table must be in the same instance. - // Values are of the form + // Required. The unique name of the snapshot from which to restore the table. + // The snapshot and the table must be in the same instance. Values are of the + // form // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. SourceSnapshot string `protobuf:"bytes,3,opt,name=source_snapshot,json=sourceSnapshot,proto3" json:"source_snapshot,omitempty"` } @@ -599,11 +600,11 @@ type ListTablesRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the instance for which tables should be listed. - // Values are of the form `projects/{project}/instances/{instance}`. + // Required. The unique name of the instance for which tables should be + // listed. Values are of the form `projects/{project}/instances/{instance}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // The view to be applied to the returned tables' fields. - // Only NAME_ONLY view (default) and REPLICATION_VIEW are supported. + // NAME_ONLY view (default) and REPLICATION_VIEW are supported. View Table_View `protobuf:"varint,2,opt,name=view,proto3,enum=google.bigtable.admin.v2.Table_View" json:"view,omitempty"` // Maximum number of results per page. // @@ -1124,10 +1125,10 @@ type ModifyColumnFamiliesRequest struct { // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. Modifications to be atomically applied to the specified table's families. - // Entries are applied in order, meaning that earlier modifications can be - // masked by later ones (in the case of repeated updates to the same family, - // for example). + // Required. Modifications to be atomically applied to the specified table's + // families. Entries are applied in order, meaning that earlier modifications + // can be masked by later ones (in the case of repeated updates to the same + // family, for example). Modifications []*ModifyColumnFamiliesRequest_Modification `protobuf:"bytes,2,rep,name=modifications,proto3" json:"modifications,omitempty"` } @@ -1184,8 +1185,8 @@ type GenerateConsistencyTokenRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the Table for which to create a consistency token. - // Values are of the form + // Required. The unique name of the Table for which to create a consistency + // token. Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1286,8 +1287,8 @@ type CheckConsistencyRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the Table for which to check replication consistency. - // Values are of the form + // Required. The unique name of the Table for which to check replication + // consistency. Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The token created using GenerateConsistencyToken for the Table. @@ -1411,9 +1412,9 @@ type SnapshotTableRequest struct { // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` - // Required. The ID by which the new snapshot should be referred to within the parent - // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` - // rather than + // Required. The ID by which the new snapshot should be referred to within the + // parent cluster, e.g., `mysnapshot` of the form: + // `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` rather than // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot`. SnapshotId string `protobuf:"bytes,3,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` // The amount of time that the new snapshot can stay active after it is @@ -1561,8 +1562,8 @@ type ListSnapshotsRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the cluster for which snapshots should be listed. - // Values are of the form + // Required. The unique name of the cluster for which snapshots should be + // listed. Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list snapshots for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -1895,7 +1896,8 @@ func (x *CreateTableFromSnapshotMetadata) GetFinishTime() *timestamppb.Timestamp return nil } -// The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. +// The request for +// [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. type CreateBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2046,7 +2048,8 @@ func (x *CreateBackupMetadata) GetEndTime() *timestamppb.Timestamp { return nil } -// The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. +// The request for +// [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. type UpdateBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2055,6 +2058,7 @@ type UpdateBackupRequest struct { // Required. The backup to update. `backup.name`, and the fields to be updated // as specified by `update_mask` are required. Other fields are ignored. // Update is only supported for the following fields: + // // - `backup.expire_time`. Backup *Backup `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"` // Required. A mask specifying which fields (e.g. `expire_time`) in the @@ -2111,7 +2115,8 @@ func (x *UpdateBackupRequest) GetUpdateMask() *fieldmaskpb.FieldMask { return nil } -// The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. +// The request for +// [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. type GetBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2162,7 +2167,8 @@ func (x *GetBackupRequest) GetName() string { return "" } -// The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. +// The request for +// [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. type DeleteBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2213,7 +2219,8 @@ func (x *DeleteBackupRequest) GetName() string { return "" } -// The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The request for +// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. type ListBackupsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2232,13 +2239,14 @@ type ListBackupsRequest struct { // roughly synonymous with equality. Filter rules are case insensitive. // // The fields eligible for filtering are: - // - `name` - // - `source_table` - // - `state` - // - `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // - `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // - `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // - `size_bytes` + // + // * `name` + // * `source_table` + // * `state` + // * `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `size_bytes` // // To filter on multiple expressions, provide each separate expression within // parentheses. By default, each expression is an AND expression. However, @@ -2258,17 +2266,19 @@ type ListBackupsRequest struct { // - `size_bytes > 10000000000` --> The backup's size is greater than 10GB Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // An expression for specifying the sort order of the results of the request. - // The string value should specify one or more fields in [Backup][google.bigtable.admin.v2.Backup]. The full - // syntax is described at https://aip.dev/132#ordering. + // The string value should specify one or more fields in + // [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at + // https://aip.dev/132#ordering. // // Fields supported are: - // - name - // - source_table - // - expire_time - // - start_time - // - end_time - // - size_bytes - // - state + // + // * name + // * source_table + // * expire_time + // * start_time + // * end_time + // * size_bytes + // * state // // For example, "start_time". The default sorting order is ascending. // To specify descending order for the field, a suffix " desc" should @@ -2282,9 +2292,10 @@ type ListBackupsRequest struct { // less, defaults to the server's maximum allowed page size. PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // If non-empty, `page_token` should contain a - // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] from a - // previous [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the same `parent` and with the same - // `filter`. + // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] + // from a previous + // [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the + // same `parent` and with the same `filter`. PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` } @@ -2355,7 +2366,8 @@ func (x *ListBackupsRequest) GetPageToken() string { return "" } -// The response for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The response for +// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. type ListBackupsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2364,8 +2376,8 @@ type ListBackupsResponse struct { // The list of matching backups. Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` // `next_page_token` can be sent in a subsequent - // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call to fetch more - // of the matching backups. + // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call + // to fetch more of the matching backups. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` } @@ -2415,6 +2427,172 @@ func (x *ListBackupsResponse) GetNextPageToken() string { return "" } +// The request for +// [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. +type CopyBackupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the destination cluster that will contain the backup + // copy. The cluster must already exists. Values are of the form: + // `projects/{project}/instances/{instance}/clusters/{cluster}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The id of the new backup. The `backup_id` along with `parent` + // are combined as {parent}/backups/{backup_id} to create the full backup + // name, of the form: + // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}`. + // This string must be between 1 and 50 characters in length and match the + // regex [_a-zA-Z0-9][-_.a-zA-Z0-9]*. + BackupId string `protobuf:"bytes,2,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + // Required. The source backup to be copied from. + // The source backup needs to be in READY state for it to be copied. + // Copying a copied backup is not allowed. + // Once CopyBackup is in progress, the source backup cannot be deleted or + // cleaned up on expiration until CopyBackup is finished. + // Values are of the form: + // `projects//instances//clusters//backups/`. + SourceBackup string `protobuf:"bytes,3,opt,name=source_backup,json=sourceBackup,proto3" json:"source_backup,omitempty"` + // Required. Required. The expiration time of the copied backup with + // microsecond granularity that must be at least 6 hours and at most 30 days + // from the time the request is received. Once the `expire_time` has + // passed, Cloud Bigtable will delete the backup and free the resources used + // by the backup. + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` +} + +func (x *CopyBackupRequest) Reset() { + *x = CopyBackupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyBackupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyBackupRequest) ProtoMessage() {} + +func (x *CopyBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyBackupRequest.ProtoReflect.Descriptor instead. +func (*CopyBackupRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{33} +} + +func (x *CopyBackupRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CopyBackupRequest) GetBackupId() string { + if x != nil { + return x.BackupId + } + return "" +} + +func (x *CopyBackupRequest) GetSourceBackup() string { + if x != nil { + return x.SourceBackup + } + return "" +} + +func (x *CopyBackupRequest) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +// Metadata type for the google.longrunning.Operation returned by +// [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. +type CopyBackupMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the backup being created through the copy operation. + // Values are of the form + // `projects//instances//clusters//backups/`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Information about the source backup that is being copied from. + SourceBackupInfo *BackupInfo `protobuf:"bytes,2,opt,name=source_backup_info,json=sourceBackupInfo,proto3" json:"source_backup_info,omitempty"` + // The progress of the + // [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup] + // operation. + Progress *OperationProgress `protobuf:"bytes,3,opt,name=progress,proto3" json:"progress,omitempty"` +} + +func (x *CopyBackupMetadata) Reset() { + *x = CopyBackupMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyBackupMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyBackupMetadata) ProtoMessage() {} + +func (x *CopyBackupMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyBackupMetadata.ProtoReflect.Descriptor instead. +func (*CopyBackupMetadata) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{34} +} + +func (x *CopyBackupMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CopyBackupMetadata) GetSourceBackupInfo() *BackupInfo { + if x != nil { + return x.SourceBackupInfo + } + return nil +} + +func (x *CopyBackupMetadata) GetProgress() *OperationProgress { + if x != nil { + return x.Progress + } + return nil +} + // An initial split point for a newly created table. type CreateTableRequest_Split struct { state protoimpl.MessageState @@ -2428,7 +2606,7 @@ type CreateTableRequest_Split struct { func (x *CreateTableRequest_Split) Reset() { *x = CreateTableRequest_Split{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2441,7 +2619,7 @@ func (x *CreateTableRequest_Split) String() string { func (*CreateTableRequest_Split) ProtoMessage() {} func (x *CreateTableRequest_Split) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2485,7 +2663,7 @@ type ModifyColumnFamiliesRequest_Modification struct { func (x *ModifyColumnFamiliesRequest_Modification) Reset() { *x = ModifyColumnFamiliesRequest_Modification{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2498,7 +2676,7 @@ func (x *ModifyColumnFamiliesRequest_Modification) String() string { func (*ModifyColumnFamiliesRequest_Modification) ProtoMessage() {} func (x *ModifyColumnFamiliesRequest_Modification) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2963,345 +3141,392 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, - 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xb2, 0x28, 0x0a, 0x12, 0x42, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, - 0x6e, 0x12, 0xab, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8f, 0x02, 0x0a, 0x11, 0x43, + 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xef, 0x01, 0x0a, + 0x12, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x28, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x52, 0x0a, 0x12, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x47, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x32, 0xa2, + 0x2a, 0x0a, 0x12, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xab, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x22, 0x4d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x22, 0x2a, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x15, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x8a, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, + 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x42, 0x22, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0xca, 0x41, 0x28, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0xa4, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0xda, 0x41, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x22, 0x4d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x22, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, - 0x8a, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, - 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x38, 0x2e, 0x67, 0x6f, + 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xce, 0x01, 0x0a, 0x0b, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, - 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x22, 0x3d, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, + 0x32, 0x30, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xda, 0x41, 0x11, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x1c, + 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x8e, 0x01, 0x0a, + 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc6, 0x01, + 0x0a, 0x0d, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x6e, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x66, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x75, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0xca, 0x41, 0x1e, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x15, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xcf, 0x01, 0x0a, 0x14, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, + 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, + 0x3f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, - 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0xca, 0x41, 0x28, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa4, 0x01, 0x0a, - 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, - 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, + 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, + 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x6f, 0x64, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, + 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x42, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x22, 0x37, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xe8, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x55, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, + 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x39, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xce, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0xda, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, - 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x32, 0x30, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x05, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0xda, 0x41, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x1c, 0x0a, 0x05, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x12, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x39, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x6e, 0x73, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0xea, 0x01, 0x0a, + 0x0d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc6, 0x01, 0x0a, 0x0d, 0x55, 0x6e, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, - 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x66, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x75, - 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0xca, 0x41, 0x1e, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x15, 0x55, 0x6e, 0x64, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0xcf, 0x01, 0x0a, 0x14, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x3f, 0x2f, 0x76, 0x32, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, - 0x41, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x2a, 0x7d, 0x3a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, + 0x24, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0xca, 0x41, 0x21, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x12, 0x15, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x47, 0x65, + 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x47, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, + 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xbb, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x49, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, + 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x12, 0xa2, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x42, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x3c, 0x22, 0x37, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, - 0x3a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x3a, 0x01, 0x2a, - 0x12, 0xe8, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x47, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x2a, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xe0, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x55, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xda, 0x01, 0x0a, 0x10, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, - 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, - 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0xea, 0x01, 0x0a, 0x0d, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0xca, 0x41, 0x21, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x15, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, - 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0xbb, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x49, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, - 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xa2, - 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x3a, 0x2a, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, - 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0xe0, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, + 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, + 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, + 0x69, 0x64, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xca, 0x41, 0x1e, 0x0a, 0x06, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa0, 0x01, 0x0a, 0x09, 0x47, + 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, - 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x36, 0x2f, 0x76, 0x32, - 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x17, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0xca, 0x41, 0x1e, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x12, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa0, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, + 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc3, 0x01, + 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, + 0x62, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x32, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, + 0x12, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x12, 0x9c, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, - 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc3, 0x01, 0x0a, 0x0c, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x45, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x38, 0x2a, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, + 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0xb3, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0xda, + 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xbb, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x62, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x47, 0x32, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x12, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, - 0x9c, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x2a, - 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, + 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0xca, 0x41, 0x1d, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x12, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xed, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x70, 0x79, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, + 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x92, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x73, 0x3a, 0x63, 0x6f, 0x70, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x2a, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0xca, 0x41, 0x1c, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x12, 0x12, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xec, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0xa0, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, + 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xf3, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0xa7, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb3, - 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xa4, 0x02, 0x0a, 0x12, + 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, + 0x01, 0x22, 0x41, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0xbb, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, - 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, - 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, - 0x01, 0x2a, 0xca, 0x41, 0x1d, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0xec, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa0, - 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0xf3, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa7, 0x01, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x5a, 0x52, 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xa4, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x22, 0x41, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, - 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x3a, 0x01, 0x2a, 0x5a, 0x52, 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xde, - 0x02, 0xca, 0x41, 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0xd2, 0x41, 0xbb, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, - 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x1a, 0xde, 0x02, 0xca, 0x41, 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xbb, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, - 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, - 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, - 0xdf, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x42, 0x17, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, - 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, + 0x6e, 0x6c, 0x79, 0x42, 0xdf, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x17, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, + 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, + 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3316,7 +3541,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP() []by return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescData } -var file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 37) var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interface{}{ (*RestoreTableRequest)(nil), // 0: google.bigtable.admin.v2.RestoreTableRequest (*RestoreTableMetadata)(nil), // 1: google.bigtable.admin.v2.RestoreTableMetadata @@ -3351,113 +3576,120 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interfa (*DeleteBackupRequest)(nil), // 30: google.bigtable.admin.v2.DeleteBackupRequest (*ListBackupsRequest)(nil), // 31: google.bigtable.admin.v2.ListBackupsRequest (*ListBackupsResponse)(nil), // 32: google.bigtable.admin.v2.ListBackupsResponse - (*CreateTableRequest_Split)(nil), // 33: google.bigtable.admin.v2.CreateTableRequest.Split - (*ModifyColumnFamiliesRequest_Modification)(nil), // 34: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification - (RestoreSourceType)(0), // 35: google.bigtable.admin.v2.RestoreSourceType - (*BackupInfo)(nil), // 36: google.bigtable.admin.v2.BackupInfo - (*OperationProgress)(nil), // 37: google.bigtable.admin.v2.OperationProgress - (*Table)(nil), // 38: google.bigtable.admin.v2.Table - (Table_View)(0), // 39: google.bigtable.admin.v2.Table.View - (*fieldmaskpb.FieldMask)(nil), // 40: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 41: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 42: google.protobuf.Duration - (*Snapshot)(nil), // 43: google.bigtable.admin.v2.Snapshot - (*Backup)(nil), // 44: google.bigtable.admin.v2.Backup - (*ColumnFamily)(nil), // 45: google.bigtable.admin.v2.ColumnFamily - (*iampb.GetIamPolicyRequest)(nil), // 46: google.iam.v1.GetIamPolicyRequest - (*iampb.SetIamPolicyRequest)(nil), // 47: google.iam.v1.SetIamPolicyRequest - (*iampb.TestIamPermissionsRequest)(nil), // 48: google.iam.v1.TestIamPermissionsRequest - (*longrunningpb.Operation)(nil), // 49: google.longrunning.Operation - (*emptypb.Empty)(nil), // 50: google.protobuf.Empty - (*iampb.Policy)(nil), // 51: google.iam.v1.Policy - (*iampb.TestIamPermissionsResponse)(nil), // 52: google.iam.v1.TestIamPermissionsResponse + (*CopyBackupRequest)(nil), // 33: google.bigtable.admin.v2.CopyBackupRequest + (*CopyBackupMetadata)(nil), // 34: google.bigtable.admin.v2.CopyBackupMetadata + (*CreateTableRequest_Split)(nil), // 35: google.bigtable.admin.v2.CreateTableRequest.Split + (*ModifyColumnFamiliesRequest_Modification)(nil), // 36: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification + (RestoreSourceType)(0), // 37: google.bigtable.admin.v2.RestoreSourceType + (*BackupInfo)(nil), // 38: google.bigtable.admin.v2.BackupInfo + (*OperationProgress)(nil), // 39: google.bigtable.admin.v2.OperationProgress + (*Table)(nil), // 40: google.bigtable.admin.v2.Table + (Table_View)(0), // 41: google.bigtable.admin.v2.Table.View + (*fieldmaskpb.FieldMask)(nil), // 42: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 43: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 44: google.protobuf.Duration + (*Snapshot)(nil), // 45: google.bigtable.admin.v2.Snapshot + (*Backup)(nil), // 46: google.bigtable.admin.v2.Backup + (*ColumnFamily)(nil), // 47: google.bigtable.admin.v2.ColumnFamily + (*iampb.GetIamPolicyRequest)(nil), // 48: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 49: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 50: google.iam.v1.TestIamPermissionsRequest + (*longrunningpb.Operation)(nil), // 51: google.longrunning.Operation + (*emptypb.Empty)(nil), // 52: google.protobuf.Empty + (*iampb.Policy)(nil), // 53: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 54: google.iam.v1.TestIamPermissionsResponse } var file_google_bigtable_admin_v2_bigtable_table_admin_proto_depIdxs = []int32{ - 35, // 0: google.bigtable.admin.v2.RestoreTableMetadata.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType - 36, // 1: google.bigtable.admin.v2.RestoreTableMetadata.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo - 37, // 2: google.bigtable.admin.v2.RestoreTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress - 37, // 3: google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress - 38, // 4: google.bigtable.admin.v2.CreateTableRequest.table:type_name -> google.bigtable.admin.v2.Table - 33, // 5: google.bigtable.admin.v2.CreateTableRequest.initial_splits:type_name -> google.bigtable.admin.v2.CreateTableRequest.Split - 39, // 6: google.bigtable.admin.v2.ListTablesRequest.view:type_name -> google.bigtable.admin.v2.Table.View - 38, // 7: google.bigtable.admin.v2.ListTablesResponse.tables:type_name -> google.bigtable.admin.v2.Table - 39, // 8: google.bigtable.admin.v2.GetTableRequest.view:type_name -> google.bigtable.admin.v2.Table.View - 38, // 9: google.bigtable.admin.v2.UpdateTableRequest.table:type_name -> google.bigtable.admin.v2.Table - 40, // 10: google.bigtable.admin.v2.UpdateTableRequest.update_mask:type_name -> google.protobuf.FieldMask - 41, // 11: google.bigtable.admin.v2.UpdateTableMetadata.start_time:type_name -> google.protobuf.Timestamp - 41, // 12: google.bigtable.admin.v2.UpdateTableMetadata.end_time:type_name -> google.protobuf.Timestamp - 41, // 13: google.bigtable.admin.v2.UndeleteTableMetadata.start_time:type_name -> google.protobuf.Timestamp - 41, // 14: google.bigtable.admin.v2.UndeleteTableMetadata.end_time:type_name -> google.protobuf.Timestamp - 34, // 15: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications:type_name -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification - 42, // 16: google.bigtable.admin.v2.SnapshotTableRequest.ttl:type_name -> google.protobuf.Duration - 43, // 17: google.bigtable.admin.v2.ListSnapshotsResponse.snapshots:type_name -> google.bigtable.admin.v2.Snapshot + 37, // 0: google.bigtable.admin.v2.RestoreTableMetadata.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType + 38, // 1: google.bigtable.admin.v2.RestoreTableMetadata.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo + 39, // 2: google.bigtable.admin.v2.RestoreTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 39, // 3: google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 40, // 4: google.bigtable.admin.v2.CreateTableRequest.table:type_name -> google.bigtable.admin.v2.Table + 35, // 5: google.bigtable.admin.v2.CreateTableRequest.initial_splits:type_name -> google.bigtable.admin.v2.CreateTableRequest.Split + 41, // 6: google.bigtable.admin.v2.ListTablesRequest.view:type_name -> google.bigtable.admin.v2.Table.View + 40, // 7: google.bigtable.admin.v2.ListTablesResponse.tables:type_name -> google.bigtable.admin.v2.Table + 41, // 8: google.bigtable.admin.v2.GetTableRequest.view:type_name -> google.bigtable.admin.v2.Table.View + 40, // 9: google.bigtable.admin.v2.UpdateTableRequest.table:type_name -> google.bigtable.admin.v2.Table + 42, // 10: google.bigtable.admin.v2.UpdateTableRequest.update_mask:type_name -> google.protobuf.FieldMask + 43, // 11: google.bigtable.admin.v2.UpdateTableMetadata.start_time:type_name -> google.protobuf.Timestamp + 43, // 12: google.bigtable.admin.v2.UpdateTableMetadata.end_time:type_name -> google.protobuf.Timestamp + 43, // 13: google.bigtable.admin.v2.UndeleteTableMetadata.start_time:type_name -> google.protobuf.Timestamp + 43, // 14: google.bigtable.admin.v2.UndeleteTableMetadata.end_time:type_name -> google.protobuf.Timestamp + 36, // 15: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications:type_name -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification + 44, // 16: google.bigtable.admin.v2.SnapshotTableRequest.ttl:type_name -> google.protobuf.Duration + 45, // 17: google.bigtable.admin.v2.ListSnapshotsResponse.snapshots:type_name -> google.bigtable.admin.v2.Snapshot 19, // 18: google.bigtable.admin.v2.SnapshotTableMetadata.original_request:type_name -> google.bigtable.admin.v2.SnapshotTableRequest - 41, // 19: google.bigtable.admin.v2.SnapshotTableMetadata.request_time:type_name -> google.protobuf.Timestamp - 41, // 20: google.bigtable.admin.v2.SnapshotTableMetadata.finish_time:type_name -> google.protobuf.Timestamp + 43, // 19: google.bigtable.admin.v2.SnapshotTableMetadata.request_time:type_name -> google.protobuf.Timestamp + 43, // 20: google.bigtable.admin.v2.SnapshotTableMetadata.finish_time:type_name -> google.protobuf.Timestamp 4, // 21: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest - 41, // 22: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time:type_name -> google.protobuf.Timestamp - 41, // 23: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time:type_name -> google.protobuf.Timestamp - 44, // 24: google.bigtable.admin.v2.CreateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup - 41, // 25: google.bigtable.admin.v2.CreateBackupMetadata.start_time:type_name -> google.protobuf.Timestamp - 41, // 26: google.bigtable.admin.v2.CreateBackupMetadata.end_time:type_name -> google.protobuf.Timestamp - 44, // 27: google.bigtable.admin.v2.UpdateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup - 40, // 28: google.bigtable.admin.v2.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask - 44, // 29: google.bigtable.admin.v2.ListBackupsResponse.backups:type_name -> google.bigtable.admin.v2.Backup - 45, // 30: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create:type_name -> google.bigtable.admin.v2.ColumnFamily - 45, // 31: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update:type_name -> google.bigtable.admin.v2.ColumnFamily - 3, // 32: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:input_type -> google.bigtable.admin.v2.CreateTableRequest - 4, // 33: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:input_type -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest - 6, // 34: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:input_type -> google.bigtable.admin.v2.ListTablesRequest - 8, // 35: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:input_type -> google.bigtable.admin.v2.GetTableRequest - 9, // 36: google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable:input_type -> google.bigtable.admin.v2.UpdateTableRequest - 11, // 37: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:input_type -> google.bigtable.admin.v2.DeleteTableRequest - 12, // 38: google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable:input_type -> google.bigtable.admin.v2.UndeleteTableRequest - 14, // 39: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:input_type -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest - 5, // 40: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:input_type -> google.bigtable.admin.v2.DropRowRangeRequest - 15, // 41: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:input_type -> google.bigtable.admin.v2.GenerateConsistencyTokenRequest - 17, // 42: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:input_type -> google.bigtable.admin.v2.CheckConsistencyRequest - 19, // 43: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:input_type -> google.bigtable.admin.v2.SnapshotTableRequest - 20, // 44: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:input_type -> google.bigtable.admin.v2.GetSnapshotRequest - 21, // 45: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:input_type -> google.bigtable.admin.v2.ListSnapshotsRequest - 23, // 46: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:input_type -> google.bigtable.admin.v2.DeleteSnapshotRequest - 26, // 47: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:input_type -> google.bigtable.admin.v2.CreateBackupRequest - 29, // 48: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:input_type -> google.bigtable.admin.v2.GetBackupRequest - 28, // 49: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:input_type -> google.bigtable.admin.v2.UpdateBackupRequest - 30, // 50: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:input_type -> google.bigtable.admin.v2.DeleteBackupRequest - 31, // 51: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:input_type -> google.bigtable.admin.v2.ListBackupsRequest - 0, // 52: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:input_type -> google.bigtable.admin.v2.RestoreTableRequest - 46, // 53: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 47, // 54: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 48, // 55: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 38, // 56: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:output_type -> google.bigtable.admin.v2.Table - 49, // 57: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:output_type -> google.longrunning.Operation - 7, // 58: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:output_type -> google.bigtable.admin.v2.ListTablesResponse - 38, // 59: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:output_type -> google.bigtable.admin.v2.Table - 49, // 60: google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable:output_type -> google.longrunning.Operation - 50, // 61: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:output_type -> google.protobuf.Empty - 49, // 62: google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable:output_type -> google.longrunning.Operation - 38, // 63: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:output_type -> google.bigtable.admin.v2.Table - 50, // 64: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:output_type -> google.protobuf.Empty - 16, // 65: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:output_type -> google.bigtable.admin.v2.GenerateConsistencyTokenResponse - 18, // 66: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:output_type -> google.bigtable.admin.v2.CheckConsistencyResponse - 49, // 67: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:output_type -> google.longrunning.Operation - 43, // 68: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:output_type -> google.bigtable.admin.v2.Snapshot - 22, // 69: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:output_type -> google.bigtable.admin.v2.ListSnapshotsResponse - 50, // 70: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:output_type -> google.protobuf.Empty - 49, // 71: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:output_type -> google.longrunning.Operation - 44, // 72: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:output_type -> google.bigtable.admin.v2.Backup - 44, // 73: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:output_type -> google.bigtable.admin.v2.Backup - 50, // 74: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:output_type -> google.protobuf.Empty - 32, // 75: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:output_type -> google.bigtable.admin.v2.ListBackupsResponse - 49, // 76: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:output_type -> google.longrunning.Operation - 51, // 77: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy - 51, // 78: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy - 52, // 79: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 56, // [56:80] is the sub-list for method output_type - 32, // [32:56] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 32, // [32:32] is the sub-list for extension extendee - 0, // [0:32] is the sub-list for field type_name + 43, // 22: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time:type_name -> google.protobuf.Timestamp + 43, // 23: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time:type_name -> google.protobuf.Timestamp + 46, // 24: google.bigtable.admin.v2.CreateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup + 43, // 25: google.bigtable.admin.v2.CreateBackupMetadata.start_time:type_name -> google.protobuf.Timestamp + 43, // 26: google.bigtable.admin.v2.CreateBackupMetadata.end_time:type_name -> google.protobuf.Timestamp + 46, // 27: google.bigtable.admin.v2.UpdateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup + 42, // 28: google.bigtable.admin.v2.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask + 46, // 29: google.bigtable.admin.v2.ListBackupsResponse.backups:type_name -> google.bigtable.admin.v2.Backup + 43, // 30: google.bigtable.admin.v2.CopyBackupRequest.expire_time:type_name -> google.protobuf.Timestamp + 38, // 31: google.bigtable.admin.v2.CopyBackupMetadata.source_backup_info:type_name -> google.bigtable.admin.v2.BackupInfo + 39, // 32: google.bigtable.admin.v2.CopyBackupMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 47, // 33: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create:type_name -> google.bigtable.admin.v2.ColumnFamily + 47, // 34: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update:type_name -> google.bigtable.admin.v2.ColumnFamily + 3, // 35: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:input_type -> google.bigtable.admin.v2.CreateTableRequest + 4, // 36: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:input_type -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest + 6, // 37: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:input_type -> google.bigtable.admin.v2.ListTablesRequest + 8, // 38: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:input_type -> google.bigtable.admin.v2.GetTableRequest + 9, // 39: google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable:input_type -> google.bigtable.admin.v2.UpdateTableRequest + 11, // 40: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:input_type -> google.bigtable.admin.v2.DeleteTableRequest + 12, // 41: google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable:input_type -> google.bigtable.admin.v2.UndeleteTableRequest + 14, // 42: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:input_type -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest + 5, // 43: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:input_type -> google.bigtable.admin.v2.DropRowRangeRequest + 15, // 44: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:input_type -> google.bigtable.admin.v2.GenerateConsistencyTokenRequest + 17, // 45: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:input_type -> google.bigtable.admin.v2.CheckConsistencyRequest + 19, // 46: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:input_type -> google.bigtable.admin.v2.SnapshotTableRequest + 20, // 47: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:input_type -> google.bigtable.admin.v2.GetSnapshotRequest + 21, // 48: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:input_type -> google.bigtable.admin.v2.ListSnapshotsRequest + 23, // 49: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:input_type -> google.bigtable.admin.v2.DeleteSnapshotRequest + 26, // 50: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:input_type -> google.bigtable.admin.v2.CreateBackupRequest + 29, // 51: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:input_type -> google.bigtable.admin.v2.GetBackupRequest + 28, // 52: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:input_type -> google.bigtable.admin.v2.UpdateBackupRequest + 30, // 53: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:input_type -> google.bigtable.admin.v2.DeleteBackupRequest + 31, // 54: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:input_type -> google.bigtable.admin.v2.ListBackupsRequest + 0, // 55: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:input_type -> google.bigtable.admin.v2.RestoreTableRequest + 33, // 56: google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup:input_type -> google.bigtable.admin.v2.CopyBackupRequest + 48, // 57: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 49, // 58: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 50, // 59: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 40, // 60: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:output_type -> google.bigtable.admin.v2.Table + 51, // 61: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:output_type -> google.longrunning.Operation + 7, // 62: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:output_type -> google.bigtable.admin.v2.ListTablesResponse + 40, // 63: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:output_type -> google.bigtable.admin.v2.Table + 51, // 64: google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable:output_type -> google.longrunning.Operation + 52, // 65: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:output_type -> google.protobuf.Empty + 51, // 66: google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable:output_type -> google.longrunning.Operation + 40, // 67: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:output_type -> google.bigtable.admin.v2.Table + 52, // 68: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:output_type -> google.protobuf.Empty + 16, // 69: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:output_type -> google.bigtable.admin.v2.GenerateConsistencyTokenResponse + 18, // 70: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:output_type -> google.bigtable.admin.v2.CheckConsistencyResponse + 51, // 71: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:output_type -> google.longrunning.Operation + 45, // 72: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:output_type -> google.bigtable.admin.v2.Snapshot + 22, // 73: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:output_type -> google.bigtable.admin.v2.ListSnapshotsResponse + 52, // 74: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:output_type -> google.protobuf.Empty + 51, // 75: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:output_type -> google.longrunning.Operation + 46, // 76: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:output_type -> google.bigtable.admin.v2.Backup + 46, // 77: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:output_type -> google.bigtable.admin.v2.Backup + 52, // 78: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:output_type -> google.protobuf.Empty + 32, // 79: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:output_type -> google.bigtable.admin.v2.ListBackupsResponse + 51, // 80: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:output_type -> google.longrunning.Operation + 51, // 81: google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup:output_type -> google.longrunning.Operation + 53, // 82: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy + 53, // 83: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy + 54, // 84: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 60, // [60:85] is the sub-list for method output_type + 35, // [35:60] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 35, // [35:35] is the sub-list for extension extendee + 0, // [0:35] is the sub-list for field type_name } func init() { file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() } @@ -3865,7 +4097,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTableRequest_Split); i { + switch v := v.(*CopyBackupRequest); i { case 0: return &v.state case 1: @@ -3877,6 +4109,30 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyBackupMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTableRequest_Split); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ModifyColumnFamiliesRequest_Modification); i { case 0: return &v.state @@ -3899,7 +4155,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { (*DropRowRangeRequest_RowKeyPrefix)(nil), (*DropRowRangeRequest_DeleteAllDataFromTable)(nil), } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[36].OneofWrappers = []interface{}{ (*ModifyColumnFamiliesRequest_Modification_Create)(nil), (*ModifyColumnFamiliesRequest_Modification_Update)(nil), (*ModifyColumnFamiliesRequest_Modification_Drop)(nil), @@ -3910,7 +4166,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc, NumEnums: 0, - NumMessages: 35, + NumMessages: 37, NumExtensions: 0, NumServices: 1, }, @@ -4016,8 +4272,8 @@ type BigtableTableAdminClient interface { // [metadata][google.longrunning.Operation.metadata] field type is // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The // [response][google.longrunning.Operation.response] field type is - // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the - // creation and delete the backup. + // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the + // returned operation will stop the creation and delete the backup. CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Gets metadata on a pending or completed Cloud Bigtable Backup. GetBackup(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) @@ -4028,8 +4284,7 @@ type BigtableTableAdminClient interface { // Lists Cloud Bigtable backups. Returns both completed and pending // backups. ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) - // Create a new table by restoring from a completed backup. The new table - // must be in the same project as the instance containing the backup. The + // Create a new table by restoring from a completed backup. The // returned table [long-running operation][google.longrunning.Operation] can // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is @@ -4037,6 +4292,9 @@ type BigtableTableAdminClient interface { // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. RestoreTable(ctx context.Context, in *RestoreTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) + // Copy a Cloud Bigtable backup to a new backup in the destination cluster + // located in the destination instance and project. + CopyBackup(ctx context.Context, in *CopyBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Gets the access control policy for a Table or Backup resource. // Returns an empty policy if the resource exists but does not have a policy // set. @@ -4044,7 +4302,8 @@ type BigtableTableAdminClient interface { // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) - // Returns permissions that the caller has on the specified Table or Backup resource. + // Returns permissions that the caller has on the specified Table or Backup + // resource. TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) } @@ -4245,6 +4504,15 @@ func (c *bigtableTableAdminClient) RestoreTable(ctx context.Context, in *Restore return out, nil } +func (c *bigtableTableAdminClient) CopyBackup(ctx context.Context, in *CopyBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) + err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *bigtableTableAdminClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { out := new(iampb.Policy) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", in, out, opts...) @@ -4354,8 +4622,8 @@ type BigtableTableAdminServer interface { // [metadata][google.longrunning.Operation.metadata] field type is // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The // [response][google.longrunning.Operation.response] field type is - // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the - // creation and delete the backup. + // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the + // returned operation will stop the creation and delete the backup. CreateBackup(context.Context, *CreateBackupRequest) (*longrunningpb.Operation, error) // Gets metadata on a pending or completed Cloud Bigtable Backup. GetBackup(context.Context, *GetBackupRequest) (*Backup, error) @@ -4366,8 +4634,7 @@ type BigtableTableAdminServer interface { // Lists Cloud Bigtable backups. Returns both completed and pending // backups. ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) - // Create a new table by restoring from a completed backup. The new table - // must be in the same project as the instance containing the backup. The + // Create a new table by restoring from a completed backup. The // returned table [long-running operation][google.longrunning.Operation] can // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is @@ -4375,6 +4642,9 @@ type BigtableTableAdminServer interface { // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. RestoreTable(context.Context, *RestoreTableRequest) (*longrunningpb.Operation, error) + // Copy a Cloud Bigtable backup to a new backup in the destination cluster + // located in the destination instance and project. + CopyBackup(context.Context, *CopyBackupRequest) (*longrunningpb.Operation, error) // Gets the access control policy for a Table or Backup resource. // Returns an empty policy if the resource exists but does not have a policy // set. @@ -4382,7 +4652,8 @@ type BigtableTableAdminServer interface { // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) - // Returns permissions that the caller has on the specified Table or Backup resource. + // Returns permissions that the caller has on the specified Table or Backup + // resource. TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) } @@ -4453,6 +4724,9 @@ func (*UnimplementedBigtableTableAdminServer) ListBackups(context.Context, *List func (*UnimplementedBigtableTableAdminServer) RestoreTable(context.Context, *RestoreTableRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method RestoreTable not implemented") } +func (*UnimplementedBigtableTableAdminServer) CopyBackup(context.Context, *CopyBackupRequest) (*longrunningpb.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method CopyBackup not implemented") +} func (*UnimplementedBigtableTableAdminServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") } @@ -4845,6 +5119,24 @@ func _BigtableTableAdmin_RestoreTable_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _BigtableTableAdmin_CopyBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CopyBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).CopyBackup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).CopyBackup(ctx, req.(*CopyBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _BigtableTableAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(iampb.GetIamPolicyRequest) if err := dec(in); err != nil { @@ -4987,6 +5279,10 @@ var _BigtableTableAdmin_serviceDesc = grpc.ServiceDesc{ MethodName: "RestoreTable", Handler: _BigtableTableAdmin_RestoreTable_Handler, }, + { + MethodName: "CopyBackup", + Handler: _BigtableTableAdmin_CopyBackup_Handler, + }, { MethodName: "GetIamPolicy", Handler: _BigtableTableAdmin_GetIamPolicy_Handler, diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go index 5c8f2ed534474..cc0d535847836 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go @@ -600,15 +600,15 @@ type Table struct { // Views: `REPLICATION_VIEW`, `ENCRYPTION_VIEW`, `FULL` ClusterStates map[string]*Table_ClusterState `protobuf:"bytes,2,rep,name=cluster_states,json=clusterStates,proto3" json:"cluster_states,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // The column families configured for this table, mapped by column family ID. - // Views: `SCHEMA_VIEW`, `FULL` + // Views: `SCHEMA_VIEW`, `STATS_VIEW`, `FULL` ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families,json=columnFamilies,proto3" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this - // table. Timestamps not matching the granularity will be rejected. - // If unspecified at creation time, the value will be set to `MILLIS`. - // Views: `SCHEMA_VIEW`, `FULL`. + // Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored + // in this table. Timestamps not matching the granularity will be rejected. If + // unspecified at creation time, the value will be set to `MILLIS`. Views: + // `SCHEMA_VIEW`, `FULL`. Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,proto3,enum=google.bigtable.admin.v2.Table_TimestampGranularity" json:"granularity,omitempty"` - // Output only. If this table was restored from another data source (e.g. a backup), this - // field will be populated with information about the restore. + // Output only. If this table was restored from another data source (e.g. a + // backup), this field will be populated with information about the restore. RestoreInfo *RestoreInfo `protobuf:"bytes,6,opt,name=restore_info,json=restoreInfo,proto3" json:"restore_info,omitempty"` // If specified, enable the change stream on this table. // Otherwise, the change stream is disabled and the change stream is not @@ -616,9 +616,10 @@ type Table struct { ChangeStreamConfig *ChangeStreamConfig `protobuf:"bytes,8,opt,name=change_stream_config,json=changeStreamConfig,proto3" json:"change_stream_config,omitempty"` // Set to true to make the table protected against data loss. i.e. deleting // the following resources through Admin APIs are prohibited: - // - The table. - // - The column families in the table. - // - The instance containing the table. + // + // * The table. + // * The column families in the table. + // * The instance containing the table. // // Note one can still delete the data stored in the table through Data APIs. DeletionProtection bool `protobuf:"varint,9,opt,name=deletion_protection,json=deletionProtection,proto3" json:"deletion_protection,omitempty"` @@ -888,11 +889,12 @@ type EncryptionInfo struct { // Output only. The type of encryption used to protect this resource. EncryptionType EncryptionInfo_EncryptionType `protobuf:"varint,3,opt,name=encryption_type,json=encryptionType,proto3,enum=google.bigtable.admin.v2.EncryptionInfo_EncryptionType" json:"encryption_type,omitempty"` - // Output only. The status of encrypt/decrypt calls on underlying data for this resource. - // Regardless of status, the existing data is always encrypted at rest. + // Output only. The status of encrypt/decrypt calls on underlying data for + // this resource. Regardless of status, the existing data is always encrypted + // at rest. EncryptionStatus *status.Status `protobuf:"bytes,4,opt,name=encryption_status,json=encryptionStatus,proto3" json:"encryption_status,omitempty"` - // Output only. The version of the Cloud KMS key specified in the parent cluster that is - // in use for the data underlying this table. + // Output only. The version of the Cloud KMS key specified in the parent + // cluster that is in use for the data underlying this table. KmsKeyVersion string `protobuf:"bytes,2,opt,name=kms_key_version,json=kmsKeyVersion,proto3" json:"kms_key_version,omitempty"` } @@ -961,7 +963,7 @@ type Snapshot struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Output only. The unique name of the snapshot. + // The unique name of the snapshot. // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -974,13 +976,13 @@ type Snapshot struct { DataSizeBytes int64 `protobuf:"varint,3,opt,name=data_size_bytes,json=dataSizeBytes,proto3" json:"data_size_bytes,omitempty"` // Output only. The time when the snapshot is created. CreateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // Output only. The time when the snapshot will be deleted. The maximum amount - // of time a snapshot can stay active is 365 days. If 'ttl' is not specified, + // The time when the snapshot will be deleted. The maximum amount of time a + // snapshot can stay active is 365 days. If 'ttl' is not specified, // the default maximum of 365 days will be used. DeleteTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` // Output only. The current state of the snapshot. State Snapshot_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.bigtable.admin.v2.Snapshot_State" json:"state,omitempty"` - // Output only. Description of the snapshot. + // Description of the snapshot. Description string `protobuf:"bytes,7,opt,name=description,proto3" json:"description,omitempty"` } @@ -1084,10 +1086,14 @@ type Backup struct { // name of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. Immutable. Name of the table from which this backup was created. This needs - // to be in the same instance as the backup. Values are of the form + // Required. Immutable. Name of the table from which this backup was created. + // This needs to be in the same instance as the backup. Values are of the form // `projects/{project}/instances/{instance}/tables/{source_table}`. SourceTable string `protobuf:"bytes,2,opt,name=source_table,json=sourceTable,proto3" json:"source_table,omitempty"` + // Output only. Name of the backup from which this backup was copied. If a + // backup is not created by copying a backup, this field will be empty. Values + // are of the form: projects//instances//backups/. + SourceBackup string `protobuf:"bytes,10,opt,name=source_backup,json=sourceBackup,proto3" json:"source_backup,omitempty"` // Required. The expiration time of the backup, with microseconds // granularity that must be at least 6 hours and at most 90 days // from the time the request is received. Once the `expire_time` @@ -1096,8 +1102,9 @@ type Backup struct { ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` // Output only. `start_time` is the time that the backup was started // (i.e. approximately the time the - // [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] request is received). The - // row data in this backup will be no older than this timestamp. + // [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] + // request is received). The row data in this backup will be no older than + // this timestamp. StartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // Output only. `end_time` is the time that the backup was finished. The row // data in the backup will be no newer than this timestamp. @@ -1156,6 +1163,13 @@ func (x *Backup) GetSourceTable() string { return "" } +func (x *Backup) GetSourceBackup() string { + if x != nil { + return x.SourceBackup + } + return "" +} + func (x *Backup) GetExpireTime() *timestamppb.Timestamp { if x != nil { return x.ExpireTime @@ -1214,6 +1228,10 @@ type BackupInfo struct { EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` // Output only. Name of the table the backup was created from. SourceTable string `protobuf:"bytes,4,opt,name=source_table,json=sourceTable,proto3" json:"source_table,omitempty"` + // Output only. Name of the backup from which this backup was copied. If a + // backup is not created by copying a backup, this field will be empty. Values + // are of the form: projects//instances//backups/. + SourceBackup string `protobuf:"bytes,10,opt,name=source_backup,json=sourceBackup,proto3" json:"source_backup,omitempty"` } func (x *BackupInfo) Reset() { @@ -1276,6 +1294,13 @@ func (x *BackupInfo) GetSourceTable() string { return "" } +func (x *BackupInfo) GetSourceBackup() string { + if x != nil { + return x.SourceBackup + } + return "" +} + // The state of a table's data in a particular cluster. type Table_ClusterState struct { state protoimpl.MessageState @@ -1612,122 +1637,128 @@ var file_google_bigtable_admin_v2_table_proto_rawDesc = []byte{ 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, - 0x02, 0x22, 0x9a, 0x04, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x12, + 0x02, 0x22, 0xae, 0x04, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, + 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0d, 0x64, 0x61, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3b, - 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x64, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x05, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, - 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, - 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, - 0x02, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x4f, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, - 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x7d, 0x22, 0xf4, - 0x04, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, - 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x0f, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x53, + 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, - 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, - 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, - 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x09, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, - 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, - 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x75, - 0xea, 0x41, 0x72, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x7d, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, + 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, + 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, + 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x7d, 0x22, 0x9e, 0x05, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x29, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x02, 0x52, + 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x0d, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, + 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, + 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x75, 0xea, 0x41, + 0x72, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x7d, 0x22, 0xf7, 0x01, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, + 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, - 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2a, 0x44, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, - 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x01, 0x42, 0xfc, 0x02, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0a, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, - 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, - 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, - 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, - 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, - 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2a, 0x44, 0x0a, + 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x4f, + 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, + 0x50, 0x10, 0x01, 0x42, 0xfc, 0x02, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, + 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go index f0e8acfb13af5..079d16a96f46e 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go @@ -188,9 +188,13 @@ type ReadRowsRequest struct { // // Return rows in lexiographical descending order of the row keys. The row // contents will not be affected by this flag. - // Example result set: [ - // {key: "k2", "f:col1": "v1", "f:col2": "v1"}, {key: "k1", "f:col1": "v2", - // "f:col2": "v2"} ]. + // + // Example result set: + // + // [ + // {key: "k2", "f:col1": "v1", "f:col2": "v1"}, + // {key: "k1", "f:col1": "v2", "f:col2": "v2"} + // ] Reversed bool `protobuf:"varint,7,opt,name=reversed,proto3" json:"reversed,omitempty"` } diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go index 921c52bcfa58a..5e7d778689411 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go @@ -35,13 +35,13 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Feature flags supported by a client. +// Feature flags supported or enabled by a client. // This is intended to be sent as part of request metadata to assure the server // that certain behaviors are safe to enable. This proto is meant to be // serialized and websafe-base64 encoded under the `bigtable-features` metadata // key. The value will remain constant for the lifetime of a client and due to // HTTP2's HPACK compression, the request overhead will be tiny. -// This is an internal implementation detail and should not be used by endusers +// This is an internal implementation detail and should not be used by end users // directly. type FeatureFlags struct { state protoimpl.MessageState @@ -52,8 +52,16 @@ type FeatureFlags struct { // reject ReadRowsRequests with the reverse bit set when this is absent. ReverseScans bool `protobuf:"varint,1,opt,name=reverse_scans,json=reverseScans,proto3" json:"reverse_scans,omitempty"` // Notify the server that the client enables batch write flow control by - // requesting RateLimitInfo from MutateRowsResponse. + // requesting RateLimitInfo from MutateRowsResponse. Due to technical reasons, + // this disables partial retries. MutateRowsRateLimit bool `protobuf:"varint,3,opt,name=mutate_rows_rate_limit,json=mutateRowsRateLimit,proto3" json:"mutate_rows_rate_limit,omitempty"` + // Notify the server that the client enables batch write flow control by + // requesting RateLimitInfo from MutateRowsResponse. With partial retries + // enabled. + MutateRowsRateLimit2 bool `protobuf:"varint,5,opt,name=mutate_rows_rate_limit2,json=mutateRowsRateLimit2,proto3" json:"mutate_rows_rate_limit2,omitempty"` + // Notify the server that the client supports the last_scanned_row field + // in ReadRowsResponse for long-running scans. + LastScannedRowResponses bool `protobuf:"varint,4,opt,name=last_scanned_row_responses,json=lastScannedRowResponses,proto3" json:"last_scanned_row_responses,omitempty"` } func (x *FeatureFlags) Reset() { @@ -102,32 +110,54 @@ func (x *FeatureFlags) GetMutateRowsRateLimit() bool { return false } +func (x *FeatureFlags) GetMutateRowsRateLimit2() bool { + if x != nil { + return x.MutateRowsRateLimit2 + } + return false +} + +func (x *FeatureFlags) GetLastScannedRowResponses() bool { + if x != nil { + return x.LastScannedRowResponses + } + return false +} + var File_google_bigtable_v2_feature_flags_proto protoreflect.FileDescriptor var file_google_bigtable_v2_feature_flags_proto_rawDesc = []byte{ 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x22, 0x68, 0x0a, 0x0c, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x53, 0x63, 0x61, 0x6e, - 0x73, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, - 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x13, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0xbd, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, - 0x32, 0x42, 0x11, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x22, 0xdc, 0x01, 0x0a, + 0x0c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x53, 0x63, 0x61, + 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x77, + 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x13, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x6f, 0x77, 0x73, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x32, 0x12, 0x3b, + 0x0a, 0x1a, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x72, + 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, + 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x42, 0xbd, 0x01, 0x0a, 0x16, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x11, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, + 0x6c, 0x61, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 0e6ae69a58461..1bc92248cb470 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3efca45914937..712fef4d0fb9d 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -34,26 +34,26 @@ import ( // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -112,19 +111,31 @@ func (a *Attributes) String() string { sb.WriteString("{") first := true for k, v := range a.m { - var key, val string - if str, ok := k.(interface{ String() string }); ok { - key = str.String() - } - if str, ok := v.(interface{ String() string }); ok { - val = str.String() - } if !first { sb.WriteString(", ") } - sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) first = false } sb.WriteString("}") return sb.String() } + +func str(x any) string { + if v, ok := x.(fmt.Stringer); ok { + return v.String() + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go b/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go index c4ba21fa46828..f9bfc2d7d61ed 100644 --- a/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go +++ b/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go @@ -56,7 +56,7 @@ type logger struct { // Log marshals the audit.Event to json and prints it to standard output. func (l *logger) Log(event *audit.Event) { - jsonContainer := map[string]interface{}{ + jsonContainer := map[string]any{ "grpc_audit_log": convertEvent(event), } jsonBytes, err := json.Marshal(jsonContainer) diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 8f00523c0e24c..b6377f445ad24 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -105,8 +105,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +115,13 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +136,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +162,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +270,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -343,9 +363,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -390,15 +414,14 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 3929c26d31e1c..a7f1eeec8e6ae 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index f070878bd9936..f3545302899f4 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 6d698229a342a..f2ddfc3788ed9 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -213,7 +213,7 @@ type lbBalancer struct { backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State - subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + subConns map[resolver.Address]balancer.SubConn // Used to new/shutdown SubConn. scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. picker balancer.Picker // Support fallback to resolved backend addresses if there's no response @@ -290,7 +290,7 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // aggregateSubConnStats calculate the aggregated state of SubConns in // lb.SubConns. These SubConns are subconns in use (when switching between // fallback and grpclb). lb.scState contains states for all SubConns, including -// those in cache (SubConns are cached for 10 seconds after remove). +// those in cache (SubConns are cached for 10 seconds after shutdown). // // The aggregated state is: // - If at least one SubConn in Ready, the aggregated state is Ready; @@ -319,7 +319,13 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { return connectivity.TransientFailure } +// UpdateSubConnState is unused; NewSubConn's options always specifies +// updateSubConnState as the listener. func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + logger.Errorf("grpclb: UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs) +} + +func (lb *lbBalancer) updateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { s := scs.ConnectivityState if logger.V(2) { logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) @@ -339,8 +345,8 @@ func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubCo case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(lb.scStates, sc) case connectivity.TransientFailure: lb.connErr = scs.ConnectionError @@ -373,8 +379,13 @@ func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop if forceRegeneratePicker || (lb.state != oldAggrState) { lb.regeneratePicker(resetDrop) } + var cc balancer.ClientConn = lb.cc + if lb.usePickFirst { + // Bypass the caching layer that would wrap the picker. + cc = lb.cc.ClientConn + } - lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) + cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) } // fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use @@ -448,17 +459,9 @@ func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) lb.handleServiceConfig(gc) - addrs := ccs.ResolverState.Addresses + backendAddrs := ccs.ResolverState.Addresses - var remoteBalancerAddrs, backendAddrs []resolver.Address - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - a.Type = resolver.Backend - remoteBalancerAddrs = append(remoteBalancerAddrs, a) - } else { - backendAddrs = append(backendAddrs, a) - } - } + var remoteBalancerAddrs []resolver.Address if sd := grpclbstate.Get(ccs.ResolverState); sd != nil { // Override any balancer addresses provided via // ccs.ResolverState.Addresses. diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index e56006d7131a8..edb66a90a3b1b 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -113,7 +113,6 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback } balancingPolicyChanged := lb.usePickFirst != pickFirst - oldUsePickFirst := lb.usePickFirst lb.usePickFirst = pickFirst if fallbackModeChanged || balancingPolicyChanged { @@ -123,13 +122,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback // For fallback mode switching with pickfirst, we want to recreate the // SubConn because the creds could be different. for a, sc := range lb.subConns { - if oldUsePickFirst { - // If old SubConn were created for pickfirst, bypass cache and - // remove directly. - lb.cc.cc.RemoveSubConn(sc) - } else { - lb.cc.RemoveSubConn(sc) - } + sc.Shutdown() delete(lb.subConns, a) } } @@ -144,16 +137,17 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback } if sc != nil { if len(backendAddrs) == 0 { - lb.cc.cc.RemoveSubConn(sc) + sc.Shutdown() delete(lb.subConns, scKey) return } - lb.cc.cc.UpdateAddresses(sc, backendAddrs) + lb.cc.ClientConn.UpdateAddresses(sc, backendAddrs) sc.Connect() return } + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } // This bypasses the cc wrapper with SubConn cache. - sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) + sc, err := lb.cc.ClientConn.NewSubConn(backendAddrs, opts) if err != nil { logger.Warningf("grpclb: failed to create new SubConn: %v", err) return @@ -176,6 +170,8 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback if _, ok := lb.subConns[addrWithoutAttrs]; !ok { // Use addrWithMD to create the SubConn. + var sc balancer.SubConn + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { logger.Warningf("grpclb: failed to create new SubConn: %v", err) @@ -194,7 +190,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback for a, sc := range lb.subConns { // a was removed by resolver. if _, ok := addrsSet[a]; !ok { - lb.cc.RemoveSubConn(sc) + sc.Shutdown() delete(lb.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. @@ -419,7 +415,7 @@ func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { } } // Trigger a re-resolve when the stream errors. - ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) + ccw.lb.cc.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) ccw.lb.mu.Lock() ccw.lb.remoteBalancerConnected = false diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go index 373f04b98d37c..680779f1c82eb 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -91,11 +91,12 @@ func (r *lbManualResolver) UpdateState(s resolver.State) { const subConnCacheTime = time.Second * 10 // lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. -// SubConns will be kept in cache for subConnCacheTime before being removed. +// SubConns will be kept in cache for subConnCacheTime before being shut down. // -// Its new and remove methods are updated to do cache first. +// Its NewSubconn and SubConn.Shutdown methods are updated to do cache first. type lbCacheClientConn struct { - cc balancer.ClientConn + balancer.ClientConn + timeout time.Duration mu sync.Mutex @@ -113,7 +114,7 @@ type subConnCacheEntry struct { func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { return &lbCacheClientConn{ - cc: cc, + ClientConn: cc, timeout: subConnCacheTime, subConnCache: make(map[resolver.Address]*subConnCacheEntry), subConnToAddr: make(map[balancer.SubConn]resolver.Address), @@ -137,16 +138,27 @@ func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer return entry.sc, nil } - scNew, err := ccc.cc.NewSubConn(addrs, opts) + scNew, err := ccc.ClientConn.NewSubConn(addrs, opts) if err != nil { return nil, err } + scNew = &lbCacheSubConn{SubConn: scNew, ccc: ccc} ccc.subConnToAddr[scNew] = addrWithoutAttrs return scNew, nil } func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) +} + +type lbCacheSubConn struct { + balancer.SubConn + ccc *lbCacheClientConn +} + +func (sc *lbCacheSubConn) Shutdown() { + ccc := sc.ccc ccc.mu.Lock() defer ccc.mu.Unlock() addr, ok := ccc.subConnToAddr[sc] @@ -156,11 +168,11 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { if entry, ok := ccc.subConnCache[addr]; ok { if entry.sc != sc { - // This could happen if NewSubConn was called multiple times for the - // same address, and those SubConns are all removed. We remove sc - // immediately here. + // This could happen if NewSubConn was called multiple times for + // the same address, and those SubConns are all shut down. We + // remove sc immediately here. delete(ccc.subConnToAddr, sc) - ccc.cc.RemoveSubConn(sc) + sc.SubConn.Shutdown() } return } @@ -176,7 +188,7 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { if entry.abortDeleting { return } - ccc.cc.RemoveSubConn(sc) + sc.SubConn.Shutdown() delete(ccc.subConnToAddr, sc) delete(ccc.subConnCache, addr) }) @@ -195,14 +207,28 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { } func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { - ccc.cc.UpdateState(s) + s.Picker = &lbCachePicker{Picker: s.Picker} + ccc.ClientConn.UpdateState(s) } func (ccc *lbCacheClientConn) close() { ccc.mu.Lock() - // Only cancel all existing timers. There's no need to remove SubConns. + defer ccc.mu.Unlock() + // Only cancel all existing timers. There's no need to shut down SubConns. for _, entry := range ccc.subConnCache { entry.cancel() } - ccc.mu.Unlock() +} + +type lbCachePicker struct { + balancer.Picker +} + +func (cp *lbCachePicker) Pick(i balancer.PickInfo) (balancer.PickResult, error) { + res, err := cp.Picker.Pick(i) + if err != nil { + return res, err + } + res.SubConn = res.SubConn.(*lbCacheSubConn).SubConn + return res, nil } diff --git a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go new file mode 100644 index 0000000000000..3289f2869f88f --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go @@ -0,0 +1,181 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package leastrequest implements a least request load balancer. +package leastrequest + +import ( + "encoding/json" + "fmt" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/serviceconfig" +) + +// grpcranduint32 is a global to stub out in tests. +var grpcranduint32 = grpcrand.Uint32 + +// Name is the name of the least request balancer. +const Name = "least_request_experimental" + +var logger = grpclog.Component("least-request") + +func init() { + balancer.Register(bb{}) +} + +// LBConfig is the balancer config for least_request_experimental balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // ChoiceCount is the number of random SubConns to sample to find the one + // with the fewest outstanding requests. If unset, defaults to 2. If set to + // < 2, the config will be rejected, and if set to > 10, will become 10. + ChoiceCount uint32 `json:"choiceCount,omitempty"` +} + +type bb struct{} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbConfig := &LBConfig{ + ChoiceCount: 2, + } + if err := json.Unmarshal(s, lbConfig); err != nil { + return nil, fmt.Errorf("least-request: unable to unmarshal LBConfig: %v", err) + } + // "If `choice_count < 2`, the config will be rejected." - A48 + if lbConfig.ChoiceCount < 2 { // sweet + return nil, fmt.Errorf("least-request: lbConfig.choiceCount: %v, must be >= 2", lbConfig.ChoiceCount) + } + // "If a LeastRequestLoadBalancingConfig with a choice_count > 10 is + // received, the least_request_experimental policy will set choice_count = + // 10." - A48 + if lbConfig.ChoiceCount > 10 { + lbConfig.ChoiceCount = 10 + } + return lbConfig, nil +} + +func (bb) Name() string { + return Name +} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &leastRequestBalancer{scRPCCounts: make(map[balancer.SubConn]*atomic.Int32)} + baseBuilder := base.NewBalancerBuilder(Name, b, base.Config{HealthCheck: true}) + b.Balancer = baseBuilder.Build(cc, bOpts) + return b +} + +type leastRequestBalancer struct { + // Embeds balancer.Balancer because needs to intercept UpdateClientConnState + // to learn about choiceCount. + balancer.Balancer + + choiceCount uint32 + scRPCCounts map[balancer.SubConn]*atomic.Int32 // Hold onto RPC counts to keep track for subsequent picker updates. +} + +func (lrb *leastRequestBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lrCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + logger.Errorf("least-request: received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + lrb.choiceCount = lrCfg.ChoiceCount + return lrb.Balancer.UpdateClientConnState(s) +} + +type scWithRPCCount struct { + sc balancer.SubConn + numRPCs *atomic.Int32 +} + +func (lrb *leastRequestBalancer) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("least-request: Build called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + + for sc := range lrb.scRPCCounts { + if _, ok := info.ReadySCs[sc]; !ok { // If no longer ready, no more need for the ref to count active RPCs. + delete(lrb.scRPCCounts, sc) + } + } + + // Create new refs if needed. + for sc := range info.ReadySCs { + if _, ok := lrb.scRPCCounts[sc]; !ok { + lrb.scRPCCounts[sc] = new(atomic.Int32) + } + } + + // Copy refs to counters into picker. + scs := make([]scWithRPCCount, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, scWithRPCCount{ + sc: sc, + numRPCs: lrb.scRPCCounts[sc], // guaranteed to be present due to algorithm + }) + } + + return &picker{ + choiceCount: lrb.choiceCount, + subConns: scs, + } +} + +type picker struct { + // choiceCount is the number of random SubConns to find the one with + // the least request. + choiceCount uint32 + // Built out when receives list of ready RPCs. + subConns []scWithRPCCount +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + var pickedSC *scWithRPCCount + var pickedSCNumRPCs int32 + for i := 0; i < int(p.choiceCount); i++ { + index := grpcranduint32() % uint32(len(p.subConns)) + sc := p.subConns[index] + n := sc.numRPCs.Load() + if pickedSC == nil || n < pickedSCNumRPCs { + pickedSC = &sc + pickedSCNumRPCs = n + } + } + // "The counter for a subchannel should be atomically incremented by one + // after it has been successfully picked by the picker." - A48 + pickedSC.numRPCs.Add(1) + // "the picker should add a callback for atomically decrementing the + // subchannel counter once the RPC finishes (regardless of Status code)." - + // A48. + done := func(balancer.DoneInfo) { + pickedSC.numRPCs.Add(-1) + } + return balancer.PickResult{ + SubConn: pickedSC.sc, + Done: done, + }, nil +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go index a164d1bedd7e2..7e751722b7c7a 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -43,7 +43,7 @@ import ( ) // Name is the name of the weighted round robin balancer. -const Name = "weighted_round_robin_experimental" +const Name = "weighted_round_robin" func init() { balancer.Register(bb{}) @@ -154,7 +154,12 @@ func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { wsc = wsci.(*weightedSubConn) } else { // addr is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) + var sc balancer.SubConn + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sc, state) + }, + }) if err != nil { b.logger.Warningf("Failed to create new SubConn for address %v: %v", addr, err) continue @@ -187,7 +192,7 @@ func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { // addr was removed by resolver. Remove. wsci, _ := b.subConns.Get(addr) wsc := wsci.(*weightedSubConn) - b.cc.RemoveSubConn(wsc.SubConn) + wsc.SubConn.Shutdown() b.subConns.Delete(addr) } } @@ -205,6 +210,10 @@ func (b *wrrBalancer) ResolverError(err error) { } func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *wrrBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { wsc := b.scMap[sc] if wsc == nil { b.logger.Errorf("UpdateSubConnState called with an unknown SubConn: %p, %v", sc, state) @@ -360,6 +369,7 @@ func (p *picker) start(ctx context.Context) { } go func() { ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod)) + defer ticker.Stop() for { select { case <-ctx.Done(): diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go index 7567462e023d4..8741fdad19dcd 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go @@ -44,7 +44,7 @@ type AddrInfo struct { } // Equal allows the values to be compared by Attributes.Equal. -func (a AddrInfo) Equal(o interface{}) bool { +func (a AddrInfo) Equal(o any) bool { oa, ok := o.(AddrInfo) return ok && oa.Weight == a.Weight } diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go index 3d5acdab6afe5..220f4e555674a 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -24,6 +24,7 @@ package weightedtarget import ( "encoding/json" "fmt" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/weightedtarget/weightedaggregator" @@ -54,7 +55,13 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba b.logger = prefixLogger(b) b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR) b.stateAggregator.Start() - b.bg = balancergroup.New(cc, bOpts, b.stateAggregator, b.logger) + b.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: bOpts, + StateAggregator: b.stateAggregator, + Logger: b.logger, + SubBalancerCloseTimeout: time.Duration(0), // Disable caching of removed child policies + }) b.bg.Start() b.logger.Infof("Created") return b @@ -163,7 +170,7 @@ func (b *weightedTargetBalancer) ResolverError(err error) { } func (b *weightedTargetBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - b.bg.UpdateSubConnState(sc, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } func (b *weightedTargetBalancer) Close() { diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 04b9ad4116912..a4411c22bfc8c 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -99,20 +99,6 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat // lock held. But the lock guards only the scheduling part. The actual // callback is called asynchronously without the lock being held. ok := ccb.serializer.Schedule(func(_ context.Context) { - // If the addresses specified in the update contain addresses of type - // "grpclb" and the selected LB policy is not "grpclb", these addresses - // will be filtered out and ccs will be modified with the updated - // address list. - if ccb.curBalancerName != grpclbName { - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs - } errCh <- ccb.balancer.UpdateClientConnState(*ccs) }) if !ok { @@ -139,7 +125,9 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { ccb.mu.Lock() ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) ccb.mu.Unlock() } @@ -221,7 +209,7 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { } ccb.mode = m - done := ccb.serializer.Done + done := ccb.serializer.Done() b := ccb.balancer ok := ccb.serializer.Schedule(func(_ context.Context) { // Close the serializer to ensure that no more calls from gRPC are sent @@ -238,11 +226,9 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { } ccb.mu.Unlock() - // Give enqueued callbacks a chance to finish. + // Give enqueued callbacks a chance to finish before closing the balancer. <-done - // Spawn a goroutine to close the balancer (since it may block trying to - // cleanup all allocated resources) and return early. - go b.Close() + b.Close() } // exitIdleMode is invoked by grpc when the channel exits idle mode either @@ -314,29 +300,19 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } ac.acbw = acbw return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -380,7 +356,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - ac *addrConn // read-only + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) mu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer @@ -398,6 +376,23 @@ func (acbw *acBalancerWrapper) Connect() { go acbw.ac.connect() } +func (acbw *acBalancerWrapper) Shutdown() { + ccb := acbw.ccb + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + // NewStream begins a streaming RPC on the addrConn. If the addrConn is not // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. @@ -411,7 +406,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, // Invoke performs a unary RPC. If the addrConn is not ready, returns // errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ec2c2fa14dd3a..5954801122ad9 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index e6a1dc5d75ed8..788c89c16f960 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,12 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return err - } - defer cc.idlenessMgr.onCallEnd() - +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 95a7459b02f65..ff7fea102288c 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -34,9 +34,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -53,8 +56,6 @@ import ( const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -137,7 +138,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - csMgr: &connectivityStateManager{}, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), czData: new(channelzData), @@ -190,6 +190,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Register ClientConn with channelz. cc.channelzRegistration(target) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -265,7 +267,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Configure idleness support with configured idle timeout or default idle // timeout duration. Idleness can be explicitly disabled by the user, by // setting the dial option to 0. - cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) // Return early for non-blocking dials. if !cc.dopts.block { @@ -316,6 +318,16 @@ func (cc *ClientConn) addTraceEvent(msg string) { channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) } +type idler ClientConn + +func (i *idler) EnterIdleMode() error { + return (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + // exitIdleMode moves the channel out of idle mode by recreating the name // resolver and load balancer. func (cc *ClientConn) exitIdleMode() error { @@ -326,7 +338,7 @@ func (cc *ClientConn) exitIdleMode() error { } if cc.idlenessState != ccIdlenessStateIdle { cc.mu.Unlock() - logger.Info("ClientConn asked to exit idle mode when not in idle mode") + channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) return nil } @@ -349,7 +361,7 @@ func (cc *ClientConn) exitIdleMode() error { cc.idlenessState = ccIdlenessStateExitingIdle exitedIdle := false if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper() + cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) } else { cc.blockingpicker.exitIdleMode() exitedIdle = true @@ -397,7 +409,8 @@ func (cc *ClientConn) enterIdleMode() error { return ErrClientConnClosing } if cc.idlenessState != ccIdlenessStateActive { - logger.Error("ClientConn asked to enter idle mode when not active") + channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() return nil } @@ -474,7 +487,6 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) cc.addTraceEvent("created") - cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -491,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -503,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -539,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -561,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -590,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -622,7 +650,7 @@ type ClientConn struct { channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idlenessManager + idlenessMgr idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -668,6 +696,19 @@ const ( ccIdlenessStateExitingIdle ) +func (s ccIdlenessState) String() string { + switch s { + case ccIdlenessStateActive: + return "active" + case ccIdlenessStateIdle: + return "idle" + case ccIdlenessStateExitingIdle: + return "exitingIdle" + default: + return "unknown" + } +} + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -759,6 +800,10 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -867,6 +912,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi cc.balancerWrapper.updateSubConnState(sc, s, err) } +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. @@ -874,7 +933,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), @@ -995,8 +1054,9 @@ func equalAddresses(a, b []resolver.Address) bool { // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + addrs = copyAddressesWithoutBalancerAttributes(addrs) if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1031,8 +1091,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.cancel() ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - // We have to defer here because GracefulClose => Close => onClose, which - // requires locking ac.mu. + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. if ac.transport != nil { defer ac.transport.GracefulClose() ac.transport = nil @@ -1137,23 +1197,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } @@ -1192,7 +1242,10 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() cc.mu.Lock() if cc.conns == nil { @@ -1226,7 +1279,7 @@ func (cc *ClientConn) Close() error { rWrapper.close() } if idlenessMgr != nil { - idlenessMgr.close() + idlenessMgr.Close() } for ac := range conns { @@ -1336,12 +1389,14 @@ func (ac *addrConn) resetTransport() { if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + ac.mu.Lock() if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() return } - ac.mu.Lock() + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1537,7 +1592,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1625,16 +1680,7 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1648,6 +1694,29 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1807,19 +1876,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and url. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - URL: *u, - }, nil + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: @@ -1872,7 +1992,11 @@ func (cc *ClientConn) determineAuthority() error { // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - cc.authority = endpoint + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 129776547811b..411e3dfd47ccd 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 150ae55767693..0854e7af65184 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -25,8 +25,8 @@ import ( "fmt" "io" "net" - "sync" + "golang.org/x/sync/semaphore" grpc "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -35,15 +35,13 @@ import ( "google.golang.org/grpc/credentials/alts/internal/conn" altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/internal/envconfig" ) const ( // The maximum byte size of receive frames. frameLimit = 64 * 1024 // 64 KB rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" - // maxPendingHandshakes represents the maximum number of concurrent - // handshakes. - maxPendingHandshakes = 100 ) var ( @@ -59,9 +57,9 @@ var ( return conn.NewAES128GCMRekey(s, keyData) }, } - // control number of concurrent created (but not closed) handshakers. - mu sync.Mutex - concurrentHandshakes = int64(0) + // control number of concurrent created (but not closed) handshakes. + clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) + serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) // errDropped occurs when maxPendingHandshakes is reached. errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") // errOutOfBound occurs when the handshake service returns a consumed @@ -77,30 +75,6 @@ func init() { } } -func acquire() bool { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - success := maxPendingHandshakes-concurrentHandshakes >= n - if success { - concurrentHandshakes += n - } - mu.Unlock() - return success -} - -func release() { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - concurrentHandshakes -= n - if concurrentHandshakes < 0 { - mu.Unlock() - panic("bad release") - } - mu.Unlock() -} - // ClientHandshakerOptions contains the client handshaker options that can // provided by the caller. type ClientHandshakerOptions struct { @@ -134,10 +108,6 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions { return &ServerHandshakerOptions{} } -// TODO: add support for future local and remote endpoint in both client options -// and server options (server options struct does not exist now. When -// caller can provide endpoints, it should be created. - // altsHandshaker is used to complete an ALTS handshake between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. @@ -185,10 +155,10 @@ func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // ClientHandshake starts and completes a client ALTS handshake for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !clientHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer clientHandshakes.Release(1) if h.side != core.ClientSide { return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") @@ -238,10 +208,10 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent // ServerHandshake starts and completes a server ALTS handshake for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !serverHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer serverHandshakes.Release(1) if h.side != core.ServerSide { return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") @@ -264,8 +234,6 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent } // Prepare server parameters. - // TODO: currently only ALTS parameters are provided. Might need to use - // more options in the future. params := make(map[int32]*altspb.ServerHandshakeParameters) params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ RecordProtocols: recordProtocols, @@ -391,3 +359,10 @@ func (h *altsHandshaker) Close() { h.stream.CloseSend() } } + +// ResetConcurrentHandshakeSemaphoreForTesting resets the handshake semaphores +// to allow numberOfAllowedHandshakes concurrent handshakes each. +func ResetConcurrentHandshakeSemaphoreForTesting(numberOfAllowedHandshakes int64) { + clientHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) + serverHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 83e3bae37b172..c7cf1810a196a 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 0b0093328bffd..81d0f11408477 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index c2e564c7ded44..69f0947582dae 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go index 957523caad107..8d8e2d4a0f5a6 100644 --- a/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go @@ -39,7 +39,7 @@ func init() { type pluginBuilder struct{} -func (p *pluginBuilder) ParseConfig(c interface{}) (*certprovider.BuildableConfig, error) { +func (p *pluginBuilder) ParseConfig(c any) (*certprovider.BuildableConfig, error) { data, ok := c.(json.RawMessage) if !ok { return nil, fmt.Errorf("meshca: unsupported config type: %T", c) diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go index f24df7c5008b9..07ba05b10746d 100644 --- a/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go @@ -66,7 +66,7 @@ func getBuilder(name string) Builder { type Builder interface { // ParseConfig parses the given config, which is in a format specific to individual // implementations, and returns a BuildableConfig on success. - ParseConfig(interface{}) (*BuildableConfig, error) + ParseConfig(any) (*BuildableConfig, error) // Name returns the name of providers built by this builder. Name() string diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go index 90f98b3c9e713..5c72f192cc339 100644 --- a/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go @@ -31,8 +31,8 @@ var provStore = &store{ // storeKey acts as the key to the map of providers maintained by the store. A // combination of provider name and configuration is used to uniquely identify // every provider instance in the store. Go maps need to be indexed by -// comparable types, so the provider configuration is converted from -// `interface{}` to string using the ParseConfig method while creating this key. +// comparable types, so the provider configuration is converted from `any` to +// `string` using the ParseConfig method while creating this key. type storeKey struct { // name of the certificate provider. name string @@ -137,7 +137,7 @@ func (bc *BuildableConfig) String() string { // ParseConfig is a convenience function to create a BuildableConfig given a // provider name and configuration. Returns an error if there is no registered // builder for the given name or if the config parsing fails. -func ParseConfig(name string, config interface{}) (*BuildableConfig, error) { +func ParseConfig(name string, config any) (*BuildableConfig, error) { parser := getBuilder(name) if parser == nil { return nil, fmt.Errorf("no certificate provider builder found for %q", name) @@ -147,7 +147,7 @@ func ParseConfig(name string, config interface{}) (*BuildableConfig, error) { // GetProvider is a convenience function to create a provider given the name, // config and build options. -func GetProvider(name string, config interface{}, opts BuildOptions) (Provider, error) { +func GetProvider(name string, config any, opts BuildOptions) (Provider, error) { bc, err := ParseConfig(name, config) if err != nil { return nil, err diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 15a3d5102a9a2..1fd0d5c127f4f 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -78,6 +78,7 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -138,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { return &joinDialOption{opts: opts} } +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -628,6 +643,7 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, + recvBufferPool: nopBufferPool{}, } } @@ -676,3 +692,24 @@ func WithIdleTimeout(d time.Duration) DialOption { o.idleTimeout = d }) } + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 07a5861352a6f..69d5580b6adfd 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -90,9 +90,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go index a3bb173c24ac7..6306e8bb0f0ad 100644 --- a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -40,7 +40,7 @@ const Name = "gzip" func init() { c := &compressor{} - c.poolCompressor.New = func() interface{} { + c.poolCompressor.New = func() any { return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} } encoding.RegisterCompressor(c) @@ -61,7 +61,7 @@ func SetLevel(level int) error { return fmt.Errorf("grpc: invalid gzip compression level: %d", level) } c := encoding.GetCompressor(Name).(*compressor) - c.poolCompressor.New = func() interface{} { + c.poolCompressor.New = func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35afe7d3..0ee3d3bae9739 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e2abb9..ac73c9ced2553 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be34bf5f..16928c9cb993c 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a4822b703..b1674d8267ca4 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 5de66e40d365b..ecfd36d713032 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -33,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 142d35f753e9f..24299efd63f7a 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index bb96ef57be89f..877d78fc3d003 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 08666f62a7cbf..3c594e6e4e55a 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go index c1f7e75c3ec87..8177fb58da9aa 100644 --- a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -115,18 +115,6 @@ func (sbc *subBalancerWrapper) exitIdle() (complete bool) { return true } -func (sbc *subBalancerWrapper) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - b := sbc.balancer - if b == nil { - // This sub-balancer was closed. This can happen when EDS removes a - // locality. The balancer for this locality was already closed, and the - // SubConns are being deleted. But SubConn state change can still - // happen. - return - } - b.UpdateSubConnState(sc, state) -} - func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) error { sbc.ccState = &s b := sbc.balancer @@ -225,8 +213,9 @@ type BalancerGroup struct { outgoingMu sync.Mutex outgoingStarted bool idToBalancerConfig map[string]*subBalancerWrapper - // Cache for sub-balancers when they are removed. - balancerCache *cache.TimeoutCache + // Cache for sub-balancers when they are removed. This is `nil` if caching + // is disabled by passing `0` for Options.SubBalancerCloseTimeout`. + deletedBalancerCache *cache.TimeoutCache // incomingMu is to make sure this balancer group doesn't send updates to cc // after it's closed. @@ -244,7 +233,7 @@ type BalancerGroup struct { // incomingMu guards all operations in the direction: // Sub-balancer-->ClientConn. Including NewSubConn, RemoveSubConn. It also // guards the map from SubConn to balancer ID, so updateSubConnState needs - // to hold it shortly to find the sub-balancer to forward the update. + // to hold it shortly to potentially delete from the map. // // UpdateState is called by the balancer state aggretator, and it will // decide when and whether to call. @@ -256,24 +245,40 @@ type BalancerGroup struct { scToSubBalancer map[balancer.SubConn]*subBalancerWrapper } -// DefaultSubBalancerCloseTimeout is defined as a variable instead of const for -// testing. -// -// TODO: make it a parameter for New(). -var DefaultSubBalancerCloseTimeout = 15 * time.Minute +// Options wraps the arguments to be passed to the BalancerGroup ctor. +type Options struct { + // CC is a reference to the parent balancer.ClientConn. + CC balancer.ClientConn + // BuildOpts contains build options to be used when creating sub-balancers. + BuildOpts balancer.BuildOptions + // StateAggregator is an implementation of the BalancerStateAggregator + // interface to aggregate picker and connectivity states from sub-balancers. + StateAggregator BalancerStateAggregator + // Logger is a group specific prefix logger. + Logger *grpclog.PrefixLogger + // SubBalancerCloseTimeout is the amount of time deleted sub-balancers spend + // in the idle cache. A value of zero here disables caching of deleted + // sub-balancers. + SubBalancerCloseTimeout time.Duration +} // New creates a new BalancerGroup. Note that the BalancerGroup // needs to be started to work. -func New(cc balancer.ClientConn, bOpts balancer.BuildOptions, stateAggregator BalancerStateAggregator, logger *grpclog.PrefixLogger) *BalancerGroup { - return &BalancerGroup{ - cc: cc, - buildOpts: bOpts, - logger: logger, - stateAggregator: stateAggregator, +func New(opts Options) *BalancerGroup { + var bc *cache.TimeoutCache + if opts.SubBalancerCloseTimeout != time.Duration(0) { + bc = cache.NewTimeoutCache(opts.SubBalancerCloseTimeout) + } - idToBalancerConfig: make(map[string]*subBalancerWrapper), - balancerCache: cache.NewTimeoutCache(DefaultSubBalancerCloseTimeout), - scToSubBalancer: make(map[balancer.SubConn]*subBalancerWrapper), + return &BalancerGroup{ + cc: opts.CC, + buildOpts: opts.BuildOpts, + stateAggregator: opts.StateAggregator, + logger: opts.Logger, + + deletedBalancerCache: bc, + idToBalancerConfig: make(map[string]*subBalancerWrapper), + scToSubBalancer: make(map[balancer.SubConn]*subBalancerWrapper), } } @@ -319,9 +324,10 @@ func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer. defer bg.outgoingMu.Unlock() var sbc *subBalancerWrapper // If outgoingStarted is true, search in the cache. Otherwise, cache is - // guaranteed to be empty, searching is unnecessary. - if bg.outgoingStarted { - if old, ok := bg.balancerCache.Remove(id); ok { + // guaranteed to be empty, searching is unnecessary. Also, skip the cache if + // caching is disabled. + if bg.outgoingStarted && bg.deletedBalancerCache != nil { + if old, ok := bg.deletedBalancerCache.Remove(id); ok { sbc, _ = old.(*subBalancerWrapper) if sbc != nil && sbc.builder != builder { // If the sub-balancer in cache was built with a different @@ -392,28 +398,47 @@ func (bg *BalancerGroup) UpdateBuilder(id string, builder balancer.Builder) { // subconns) will be done after timeout. func (bg *BalancerGroup) Remove(id string) { bg.logger.Infof("Removing child policy for locality %q", id) + bg.outgoingMu.Lock() - if sbToRemove, ok := bg.idToBalancerConfig[id]; ok { - if bg.outgoingStarted { - bg.balancerCache.Add(id, sbToRemove, func() { - // A sub-balancer evicted from the timeout cache needs to closed - // and its subConns need to removed, unconditionally. There is a - // possibility that a sub-balancer might be removed (thereby - // moving it to the cache) around the same time that the - // balancergroup is closed, and by the time we get here the - // balancergroup might be closed. Check for `outgoingStarted == - // true` at that point can lead to a leaked sub-balancer. - bg.outgoingMu.Lock() - sbToRemove.stopBalancer() - bg.outgoingMu.Unlock() - bg.cleanupSubConns(sbToRemove) - }) - } - delete(bg.idToBalancerConfig, id) - } else { + + sbToRemove, ok := bg.idToBalancerConfig[id] + if !ok { bg.logger.Infof("balancer group: trying to remove a non-existing locality from balancer group: %v", id) + bg.outgoingMu.Unlock() + return } + + // Unconditionally remove the sub-balancer config from the map. + delete(bg.idToBalancerConfig, id) + if !bg.outgoingStarted { + // Nothing needs to be done here, since we wouldn't have created the + // sub-balancer. + bg.outgoingMu.Unlock() + return + } + + if bg.deletedBalancerCache != nil { + bg.deletedBalancerCache.Add(id, sbToRemove, func() { + // A sub-balancer evicted from the timeout cache needs to closed + // and its subConns need to removed, unconditionally. There is a + // possibility that a sub-balancer might be removed (thereby + // moving it to the cache) around the same time that the + // balancergroup is closed, and by the time we get here the + // balancergroup might be closed. Check for `outgoingStarted == + // true` at that point can lead to a leaked sub-balancer. + bg.outgoingMu.Lock() + sbToRemove.stopBalancer() + bg.outgoingMu.Unlock() + bg.cleanupSubConns(sbToRemove) + }) + bg.outgoingMu.Unlock() + return + } + + // Remove the sub-balancer with immediate effect if we are not caching. + sbToRemove.stopBalancer() bg.outgoingMu.Unlock() + bg.cleanupSubConns(sbToRemove) } // bg.remove(id) doesn't do cleanup for the sub-balancer. This function does @@ -449,12 +474,11 @@ func (bg *BalancerGroup) connect(sb *subBalancerWrapper) { // Following are actions from the parent grpc.ClientConn, forward to sub-balancers. -// UpdateSubConnState handles the state for the subconn. It finds the -// corresponding balancer and forwards the update. -func (bg *BalancerGroup) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to cb and updates scToSubBalancer if +// needed. +func (bg *BalancerGroup) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { bg.incomingMu.Lock() - config, ok := bg.scToSubBalancer[sc] - if !ok { + if _, ok := bg.scToSubBalancer[sc]; !ok { bg.incomingMu.Unlock() return } @@ -465,10 +489,18 @@ func (bg *BalancerGroup) UpdateSubConnState(sc balancer.SubConn, state balancer. bg.incomingMu.Unlock() bg.outgoingMu.Lock() - config.updateSubConnState(sc, state) + if cb != nil { + cb(state) + } bg.outgoingMu.Unlock() } +// UpdateSubConnState handles the state for the subconn. It finds the +// corresponding balancer and forwards the update. +func (bg *BalancerGroup) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + bg.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + // UpdateClientConnState handles ClientState (including balancer config and // addresses) from resolver. It finds the balancer and forwards the update. func (bg *BalancerGroup) UpdateClientConnState(id string, s balancer.ClientConnState) error { @@ -507,6 +539,9 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver bg.incomingMu.Unlock() return nil, fmt.Errorf("NewSubConn is called after balancer group is closed") } + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bg.updateSubConnState(sc, state, oldListener) } sc, err := bg.cc.NewSubConn(addrs, opts) if err != nil { bg.incomingMu.Unlock() @@ -540,7 +575,7 @@ func (bg *BalancerGroup) Close() { bg.incomingStarted = false // Also remove all SubConns. for sc := range bg.scToSubBalancer { - bg.cc.RemoveSubConn(sc) + sc.Shutdown() delete(bg.scToSubBalancer, sc) } } @@ -548,7 +583,9 @@ func (bg *BalancerGroup) Close() { // Clear(true) runs clear function to close sub-balancers in cache. It // must be called out of outgoing mutex. - bg.balancerCache.Clear(true) + if bg.deletedBalancerCache != nil { + bg.deletedBalancerCache.Clear(true) + } bg.outgoingMu.Lock() if bg.outgoingStarted { diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d96657e1..94a08d6875a95 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 6c3f632215fd3..0f31274a3ccca 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -230,7 +230,7 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { @@ -270,7 +270,7 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 81c2f5fd761b8..4399c3df4959d 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -28,25 +28,25 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any closed bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) { b.mu.Lock() defer b.mu.Unlock() if b.closed { @@ -89,7 +89,7 @@ func (b *Unbounded) Load() { // // If the unbounded buffer is closed, the read channel returned by this method // is closed. -func (b *Unbounded) Get() <-chan interface{} { +func (b *Unbounded) Get() <-chan any { return b.c } diff --git a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go index 200b499ec81ef..3f2d47302c4e1 100644 --- a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go +++ b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go @@ -23,7 +23,7 @@ import ( ) type cacheEntry struct { - item interface{} + item any // Note that to avoid deadlocks (potentially caused by lock ordering), // callback can only be called without holding cache's mutex. callback func() @@ -38,14 +38,14 @@ type cacheEntry struct { type TimeoutCache struct { mu sync.Mutex timeout time.Duration - cache map[interface{}]*cacheEntry + cache map[any]*cacheEntry } // NewTimeoutCache creates a TimeoutCache with the given timeout. func NewTimeoutCache(timeout time.Duration) *TimeoutCache { return &TimeoutCache{ timeout: timeout, - cache: make(map[interface{}]*cacheEntry), + cache: make(map[any]*cacheEntry), } } @@ -57,7 +57,7 @@ func NewTimeoutCache(timeout time.Duration) *TimeoutCache { // If the Add was successful, it returns (newly added item, true). If there is // an existing entry for the specified key, the cache entry is not be updated // with the specified item and it returns (existing item, false). -func (c *TimeoutCache) Add(key, item interface{}, callback func()) (interface{}, bool) { +func (c *TimeoutCache) Add(key, item any, callback func()) (any, bool) { c.mu.Lock() defer c.mu.Unlock() if e, ok := c.cache[key]; ok { @@ -88,7 +88,7 @@ func (c *TimeoutCache) Add(key, item interface{}, callback func()) (interface{}, // If the specified key exists in the cache, it returns (item associated with // key, true) and the callback associated with the item is guaranteed to be not // called. If the given key is not found in the cache, it returns (nil, false) -func (c *TimeoutCache) Remove(key interface{}) (item interface{}, ok bool) { +func (c *TimeoutCache) Remove(key any) (item any, ok bool) { c.mu.Lock() defer c.mu.Unlock() entry, ok := c.removeInternal(key) @@ -101,7 +101,7 @@ func (c *TimeoutCache) Remove(key interface{}) (item interface{}, ok bool) { // removeInternal removes and returns the item with key. // // caller must hold c.mu. -func (c *TimeoutCache) removeInternal(key interface{}) (*cacheEntry, bool) { +func (c *TimeoutCache) removeInternal(key any) (*cacheEntry, bool) { entry, ok := c.cache[key] if !ok { return nil, false diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 777cbcd7921d9..5395e77529cd6 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,9 +24,7 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" @@ -40,8 +38,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +53,14 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index 8e13a3d2ce7b6..f89e6f77bbd0a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 7b2f350e2e645..1d4020f537953 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44e1dcb..98288c3f866fd 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc4024000..b5568b22e208c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b59033cd1..9deee7f6513e1 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go b/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go index 9fa0c94f41e8d..b6f1fa520fc44 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go +++ b/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go @@ -43,12 +43,26 @@ func init() { // the Attributes field of resolver.Address. type handshakeAttrKey struct{} -// Equal reports whether the handshake info structs are identical (have the -// same pointer). This is sufficient as all subconns from one CDS balancer use -// the same one. -func (hi *HandshakeInfo) Equal(o interface{}) bool { - oh, ok := o.(*HandshakeInfo) - return ok && oh == hi +// Equal reports whether the handshake info structs are identical. +func (hi *HandshakeInfo) Equal(other *HandshakeInfo) bool { + if hi == nil && other == nil { + return true + } + if hi == nil || other == nil { + return false + } + if hi.rootProvider != other.rootProvider || + hi.identityProvider != other.identityProvider || + hi.requireClientCert != other.requireClientCert || + len(hi.sanMatchers) != len(other.sanMatchers) { + return false + } + for i := range hi.sanMatchers { + if !hi.sanMatchers[i].Equal(other.sanMatchers[i]) { + return false + } + } + return true } // SetHandshakeInfo returns a copy of addr in which the Attributes field is diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 80fd5c7d2a4f4..3cf10ddfbd4c0 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -37,9 +37,15 @@ var ( // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy, which can be enabled by setting the environment - // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // pick_first LB policy. + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index b68e26a364935..bfc45102ab245 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 02224b42ca869..faa998de7632b 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // rewrite PrefixLogger a little to ensure that we don't use the global // `Logger` here, and instead use the `logger` field. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index d08e3e907666b..aa97273e7d13e 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -80,6 +80,13 @@ func Uint32() uint32 { return r.Uint32() } +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + // Shuffle implements rand.Shuffle on the grpcrand global source. var Shuffle = func(n int, f func(int, int)) { mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 37b8d4117e778..900917dbe6c1d 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -32,10 +32,10 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { - // Done is closed once the serializer is shut down completely, i.e all + // done is closed once the serializer is shut down completely, i.e all // scheduled callbacks are executed and the serializer has deallocated all // its resources. - Done chan struct{} + done chan struct{} callbacks *buffer.Unbounded closedMu sync.Mutex @@ -48,12 +48,12 @@ type CallbackSerializer struct { // callbacks will be added once this context is canceled, and any pending un-run // callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{ - Done: make(chan struct{}), + cs := &CallbackSerializer{ + done: make(chan struct{}), callbacks: buffer.NewUnbounded(), } - go t.run(ctx) - return t + go cs.run(ctx) + return cs } // Schedule adds a callback to be scheduled after existing callbacks are run. @@ -64,56 +64,62 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // Return value indicates if the callback was successfully added to the list of // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - t.closedMu.Lock() - defer t.closedMu.Unlock() +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + cs.closedMu.Lock() + defer cs.closedMu.Unlock() - if t.closed { + if cs.closed { return false } - t.callbacks.Put(f) + cs.callbacks.Put(f) return true } -func (t *CallbackSerializer) run(ctx context.Context) { +func (cs *CallbackSerializer) run(ctx context.Context) { var backlog []func(context.Context) - defer close(t.Done) + defer close(cs.done) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-t.callbacks.Get(): + case callback, ok := <-cs.callbacks.Get(): if !ok { return } - t.callbacks.Load() + cs.callbacks.Load() callback.(func(ctx context.Context))(ctx) } } // Fetch pending callbacks if any, and execute them before returning from - // this method and closing t.Done. - t.closedMu.Lock() - t.closed = true - backlog = t.fetchPendingCallbacks() - t.callbacks.Close() - t.closedMu.Unlock() + // this method and closing cs.done. + cs.closedMu.Lock() + cs.closed = true + backlog = cs.fetchPendingCallbacks() + cs.callbacks.Close() + cs.closedMu.Unlock() for _, b := range backlog { b(ctx) } } -func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { +func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { var backlog []func(context.Context) for { select { - case b := <-t.callbacks.Get(): + case b := <-cs.callbacks.Get(): backlog = append(backlog, b.(func(context.Context))) - t.callbacks.Load() + cs.callbacks.Load() default: return backlog } } } + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 0000000000000..aef8cec1ab0cd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go index 884ae22292dcb..c3baac3643ce4 100644 --- a/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go +++ b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go @@ -32,7 +32,7 @@ const pathKey = pathKeyType("grpc.internal.address.hierarchical_path") type pathValue []string -func (p pathValue) Equal(o interface{}) bool { +func (p pathValue) Equal(o any) bool { op, ok := o.(pathValue) if !ok { return false diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go similarity index 61% rename from vendor/google.golang.org/grpc/idle.go rename to vendor/google.golang.org/grpc/internal/idle/idle.go index dc3dc72f6b09d..6c272476e5ef6 100644 --- a/vendor/google.golang.org/grpc/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -16,7 +16,9 @@ * */ -package grpc +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle import ( "fmt" @@ -24,6 +26,8 @@ import ( "sync" "sync/atomic" "time" + + "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -31,31 +35,31 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { return time.AfterFunc(d, f) } -// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// Enforcer is the functionality provided by grpc.ClientConn to enter // and exit from idle mode. -type idlenessEnforcer interface { - exitIdleMode() error - enterIdleMode() error +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() error } -// idlenessManager defines the functionality required to track RPC activity on a +// Manager defines the functionality required to track RPC activity on a // channel. -type idlenessManager interface { - onCallBegin() error - onCallEnd() - close() +type Manager interface { + OnCallBegin() error + OnCallEnd() + Close() } -type noopIdlenessManager struct{} +type noopManager struct{} -func (noopIdlenessManager) onCallBegin() error { return nil } -func (noopIdlenessManager) onCallEnd() {} -func (noopIdlenessManager) close() {} +func (noopManager) OnCallBegin() error { return nil } +func (noopManager) OnCallEnd() {} +func (noopManager) Close() {} -// idlenessManagerImpl implements the idlenessManager interface. It uses atomic -// operations to synchronize access to shared state and a mutex to guarantee -// mutual exclusion in a critical section. -type idlenessManagerImpl struct { +// manager implements the Manager interface. It uses atomic operations to +// synchronize access to shared state and a mutex to guarantee mutual exclusion +// in a critical section. +type manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -64,14 +68,15 @@ type idlenessManagerImpl struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. - enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + logger grpclog.LoggerV2 // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: // - a: Idle timeout has fired and handleIdleTimeout() is trying to put // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and onCallBegin() + // - b: At the same time an RPC is made on the channel, and OnCallBegin() // is trying to prevent the channel from going idle. // - Competing intentions: // - The channel is in idle mode and there are multiple RPCs starting at @@ -83,28 +88,37 @@ type idlenessManagerImpl struct { timer *time.Timer } -// newIdlenessManager creates a new idleness manager implementation for the +// ManagerOptions is a collection of options used by +// NewManager. +type ManagerOptions struct { + Enforcer Enforcer + Timeout time.Duration + Logger grpclog.LoggerV2 +} + +// NewManager creates a new idleness manager implementation for the // given idle timeout. -func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { - if idleTimeout == 0 { - return noopIdlenessManager{} +func NewManager(opts ManagerOptions) Manager { + if opts.Timeout == 0 { + return noopManager{} } - i := &idlenessManagerImpl{ - enforcer: enforcer, - timeout: int64(idleTimeout), + m := &manager{ + enforcer: opts.Enforcer, + timeout: int64(opts.Timeout), + logger: opts.Logger, } - i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) - return i + m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) + return m } // resetIdleTimer resets the idle timer to the given duration. This method // should only be called from the timer callback. -func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if i.timer == nil { + if m.timer == nil { // Only close sets timer to nil. We are done. return } @@ -112,47 +126,47 @@ func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { // It is safe to ignore the return value from Reset() because this method is // only ever called from the timer callback, which means the timer has // already fired. - i.timer.Reset(d) + m.timer.Reset(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (i *idlenessManagerImpl) handleIdleTimeout() { - if i.isClosed() { +func (m *manager) handleIdleTimeout() { + if m.isClosed() { return } - if atomic.LoadInt32(&i.activeCallsCount) > 0 { - i.resetIdleTimer(time.Duration(i.timeout)) + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(time.Duration(m.timeout)) return } // There has been activity on the channel since we last got here. Reset the // timer and return. - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) - i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) return } // This CAS operation is extremely likely to succeed given that there has // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { // This CAS operation can fail if an RPC started after we checked for // activity at the top of this method, or one was ongoing from before // the last time we were here. In both case, reset the timer and return. - i.resetIdleTimer(time.Duration(i.timeout)) + m.resetIdleTimer(time.Duration(m.timeout)) return } // Now that we've set the active calls count to -math.MaxInt32, it's time to // actually move to idle mode. - if i.tryEnterIdleMode() { + if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return } @@ -160,8 +174,8 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.resetIdleTimer(time.Duration(i.timeout)) + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.resetIdleTimer(time.Duration(m.timeout)) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -171,15 +185,15 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (i *idlenessManagerImpl) tryEnterIdleMode() bool { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) tryEnterIdleMode() bool { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. return false } - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // An very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. @@ -189,99 +203,99 @@ func (i *idlenessManagerImpl) tryEnterIdleMode() bool { // No new RPCs have come in since we last set the active calls count value // -math.MaxInt32 in the timer callback. And since we have the lock, it is // safe to enter idle mode now. - if err := i.enforcer.enterIdleMode(); err != nil { - logger.Errorf("Failed to enter idle mode: %v", err) + if err := m.enforcer.EnterIdleMode(); err != nil { + m.logger.Errorf("Failed to enter idle mode: %v", err) return false } // Successfully entered idle mode. - i.actuallyIdle = true + m.actuallyIdle = true return true } -// onCallBegin is invoked at the start of every RPC. -func (i *idlenessManagerImpl) onCallBegin() error { - if i.isClosed() { +// OnCallBegin is invoked at the start of every RPC. +func (m *manager) OnCallBegin() error { + if m.isClosed() { return nil } - if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) return nil } // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := i.exitIdleMode(); err != nil { + if err := m.exitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. - atomic.AddInt32(&i.activeCallsCount, -1) + atomic.AddInt32(&m.activeCallsCount, -1) return err } - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) return nil } // exitIdleMode instructs the channel to exit idle mode. // // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (i *idlenessManagerImpl) exitIdleMode() error { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) exitIdleMode() error { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if !i.actuallyIdle { + if !m.actuallyIdle { // This can happen in two scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and onCallBegin() noticed that the calls count is negative. + // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in onCallBegin and get + // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. // // Either way, nothing to do here. return nil } - if err := i.enforcer.exitIdleMode(); err != nil { + if err := m.enforcer.ExitIdleMode(); err != nil { return fmt.Errorf("channel failed to exit idle mode: %v", err) } // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.actuallyIdle = false + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) return nil } -// onCallEnd is invoked at the end of every RPC. -func (i *idlenessManagerImpl) onCallEnd() { - if i.isClosed() { +// OnCallEnd is invoked at the end of every RPC. +func (m *manager) OnCallEnd() { + if m.isClosed() { return } // Record the time at which the most recent call finished. - atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) // Decrement the active calls count. This count can temporarily go negative // when the timer callback is in the process of moving the channel to idle // mode, but one or more RPCs come in and complete before the timer callback // can get done with the process of moving to idle mode. - atomic.AddInt32(&i.activeCallsCount, -1) + atomic.AddInt32(&m.activeCallsCount, -1) } -func (i *idlenessManagerImpl) isClosed() bool { - return atomic.LoadInt32(&i.closed) == 1 +func (m *manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 } -func (i *idlenessManagerImpl) close() { - atomic.StoreInt32(&i.closed, 1) +func (m *manager) Close() { + atomic.StoreInt32(&m.closed, 1) - i.idleMu.Lock() - i.timer.Stop() - i.timer = nil - i.idleMu.Unlock() + m.idleMu.Lock() + m.timer.Stop() + m.timer = nil + m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 42ff39c84446b..c8a8c76d628ca 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,33 +53,33 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - CanonicalString interface{} // func (codes.Code) string + CanonicalString any // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + DrainServerTransports any // func(*grpc.Server, string) // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. // @@ -88,14 +92,14 @@ var ( // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + AddGlobalDialOptions any // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - DisableGlobalDialOptions interface{} // func() grpc.DialOption + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. // @@ -104,23 +108,26 @@ var ( ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. - JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -131,7 +138,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -163,7 +170,11 @@ var ( UnregisterRBACHTTPFilterForTesting func() // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -174,7 +185,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index c82e608e07734..900bfb7160803 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 0177af4b51140..7033191375deb 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index df4cd5484e4ee..2f0417bd8db66 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/lookup/v1/rls.proto diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go index 317a35a390c4f..0c687504b30b8 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/lookup/v1/rls_config.proto diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a948adbe..f0603871c93ac 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 09a667f33cb09..99e1e5b36c891 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -62,7 +62,8 @@ const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. @@ -86,14 +87,14 @@ var ( minDNSResRate = 30 * time.Second ) -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: addressDialer(authorityWithPort), }, nil } @@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { @@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.URL.Host) + d.resolver, err = newNetResolver(target.URL.Host) if err != nil { return nil, err } @@ -180,19 +182,22 @@ type dnsResolver struct { ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() { var timer *time.Timer if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 timer = newTimerDNSResRate(minDNSResRate) select { @@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() { case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } @@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index b0ead4f54f82f..4cf85cad9f810 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -49,7 +49,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +64,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +120,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index be5a9c81eb976..b330ccedc8abd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -49,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: l.incomingWindowUpdateHandler(i) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 326bf08480002..badab8acf3b11 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -330,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -762,7 +762,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it interface{}) bool { + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -800,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -815,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -927,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -1080,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1505,14 +1505,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1520,15 +1521,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1554,9 +1552,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { statusGen = status.New(rawStatusCode, grpcMessage) } - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index ec4eef21342a4..8d3a353c1d581 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -165,16 +165,21 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - if config.MaxStreams != math.MaxUint32 { + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: config.MaxStreams, + Val: maxStreams, }) } dynamicWindow := true @@ -233,7 +238,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -253,7 +258,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), - maxStreams: config.MaxStreams, + maxStreams: maxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, @@ -850,7 +855,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -934,7 +939,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19cbb18f5ab44..1958140082b35 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -30,6 +30,7 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" @@ -309,6 +310,7 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -316,12 +318,17 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -332,19 +339,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { n, err = w.conn.Write(b) return n, toIOError(err) } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -381,7 +403,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -389,7 +414,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -403,6 +432,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aa1c896595d9a..74a811fc0590b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,10 +43,6 @@ import ( "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +52,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -390,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -559,6 +551,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -592,6 +585,8 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -736,7 +731,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/vendor/google.golang.org/grpc/internal/wrr/edf.go b/vendor/google.golang.org/grpc/internal/wrr/edf.go index b4fb3f9d3bea7..a06656f464009 100644 --- a/vendor/google.golang.org/grpc/internal/wrr/edf.go +++ b/vendor/google.golang.org/grpc/internal/wrr/edf.go @@ -43,7 +43,7 @@ type edfEntry struct { deadline float64 weight int64 orderOffset uint64 - item interface{} + item any } // edfPriorityQueue is a heap.Interface implementation for edfEntry elements. @@ -55,17 +55,17 @@ func (pq edfPriorityQueue) Less(i, j int) bool { } func (pq edfPriorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } -func (pq *edfPriorityQueue) Push(x interface{}) { +func (pq *edfPriorityQueue) Push(x any) { *pq = append(*pq, x.(*edfEntry)) } -func (pq *edfPriorityQueue) Pop() interface{} { +func (pq *edfPriorityQueue) Pop() any { old := *pq *pq = old[0 : len(old)-1] return old[len(old)-1] } -func (edf *edfWrr) Add(item interface{}, weight int64) { +func (edf *edfWrr) Add(item any, weight int64) { edf.lock.Lock() defer edf.lock.Unlock() entry := edfEntry{ @@ -78,7 +78,7 @@ func (edf *edfWrr) Add(item interface{}, weight int64) { heap.Push(&edf.items, &entry) } -func (edf *edfWrr) Next() interface{} { +func (edf *edfWrr) Next() any { edf.lock.Lock() defer edf.lock.Unlock() if len(edf.items) == 0 { diff --git a/vendor/google.golang.org/grpc/internal/wrr/random.go b/vendor/google.golang.org/grpc/internal/wrr/random.go index 6d5eb7d462099..25bbd82594d6d 100644 --- a/vendor/google.golang.org/grpc/internal/wrr/random.go +++ b/vendor/google.golang.org/grpc/internal/wrr/random.go @@ -27,7 +27,7 @@ import ( // weightedItem is a wrapped weighted item that is used to implement weighted random algorithm. type weightedItem struct { - item interface{} + item any weight int64 accumulatedWeight int64 } @@ -51,7 +51,7 @@ func NewRandom() WRR { var grpcrandInt63n = grpcrand.Int63n -func (rw *randomWRR) Next() (item interface{}) { +func (rw *randomWRR) Next() (item any) { rw.mu.RLock() defer rw.mu.RUnlock() if len(rw.items) == 0 { @@ -71,7 +71,7 @@ func (rw *randomWRR) Next() (item interface{}) { return rw.items[i].item } -func (rw *randomWRR) Add(item interface{}, weight int64) { +func (rw *randomWRR) Add(item any, weight int64) { rw.mu.Lock() defer rw.mu.Unlock() accumulatedWeight := weight diff --git a/vendor/google.golang.org/grpc/internal/wrr/wrr.go b/vendor/google.golang.org/grpc/internal/wrr/wrr.go index d46bfad86ef47..d0d82cf4f0151 100644 --- a/vendor/google.golang.org/grpc/internal/wrr/wrr.go +++ b/vendor/google.golang.org/grpc/internal/wrr/wrr.go @@ -24,9 +24,9 @@ type WRR interface { // Add adds an item with weight to the WRR set. // // Add and Next need to be thread safe. - Add(item interface{}, weight int64) + Add(item any, weight int64) // Next returns the next picked item. // // Add and Next need to be thread safe. - Next() interface{} + Next() any } diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go index 9873da268db6e..c9f71d32cbb28 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -285,6 +285,12 @@ func newHeaderMatcher(headerMatcherConfig *v3route_componentspb.HeaderMatcher) ( m = internalmatcher.NewHeaderSuffixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetSuffixMatch(), headerMatcherConfig.InvertMatch) case *v3route_componentspb.HeaderMatcher_ContainsMatch: m = internalmatcher.NewHeaderContainsMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetContainsMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_StringMatch: + sm, err := internalmatcher.StringMatcherFromProto(headerMatcherConfig.GetStringMatch()) + if err != nil { + return nil, fmt.Errorf("invalid string matcher %+v: %v", headerMatcherConfig.GetStringMatch(), err) + } + m = internalmatcher.NewHeaderStringMatcher(headerMatcherConfig.Name, sm, headerMatcherConfig.InvertMatch) default: return nil, errors.New("unknown header matcher type") } diff --git a/vendor/google.golang.org/grpc/orca/call_metrics.go b/vendor/google.golang.org/grpc/orca/call_metrics.go index 558c7bce6a8ee..157dad49c6571 100644 --- a/vendor/google.golang.org/grpc/orca/call_metrics.go +++ b/vendor/google.golang.org/grpc/orca/call_metrics.go @@ -135,8 +135,8 @@ func CallMetricsServerOption(smp ServerMetricsProvider) grpc.ServerOption { return joinServerOptions(grpc.ChainUnaryInterceptor(unaryInt(smp)), grpc.ChainStreamInterceptor(streamInt(smp))) } -func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { // We don't allocate the metric recorder here. It will be allocated the // first time the user calls CallMetricsRecorderFromContext(). rw := &recorderWrapper{smp: smp} @@ -155,8 +155,8 @@ func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req interface } } -func streamInt(smp ServerMetricsProvider) func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +func streamInt(smp ServerMetricsProvider) func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { // We don't allocate the metric recorder here. It will be allocated the // first time the user calls CallMetricsRecorderFromContext(). rw := &recorderWrapper{smp: smp} diff --git a/vendor/google.golang.org/grpc/orca/internal/internal.go b/vendor/google.golang.org/grpc/orca/internal/internal.go index 35b899d9e8773..d1425c3e71644 100644 --- a/vendor/google.golang.org/grpc/orca/internal/internal.go +++ b/vendor/google.golang.org/grpc/orca/internal/internal.go @@ -35,7 +35,7 @@ import ( // configured via ServiceOptions, to a minimum of 30s. // // For testing purposes only. -var AllowAnyMinReportingInterval interface{} // func(*ServiceOptions) +var AllowAnyMinReportingInterval any // func(*ServiceOptions) // DefaultBackoffFunc is used by the producer to control its backoff behavior. // diff --git a/vendor/google.golang.org/grpc/orca/orca.go b/vendor/google.golang.org/grpc/orca/orca.go index 771db36af1c93..d0cb3720c8ac9 100644 --- a/vendor/google.golang.org/grpc/orca/orca.go +++ b/vendor/google.golang.org/grpc/orca/orca.go @@ -44,14 +44,11 @@ var logger = grpclog.Component("orca-backend-metrics") // import cycle. Hence this roundabout method is used. type loadParser struct{} -func (loadParser) Parse(md metadata.MD) interface{} { +func (loadParser) Parse(md metadata.MD) any { lr, err := internal.ToLoadReport(md) if err != nil { logger.Infof("Parse failed: %v", err) } - if lr == nil && logger.V(2) { - logger.Infof("Missing ORCA load report data") - } return lr } diff --git a/vendor/google.golang.org/grpc/orca/producer.go b/vendor/google.golang.org/grpc/orca/producer.go index ce108aad65caa..2d58725547fc0 100644 --- a/vendor/google.golang.org/grpc/orca/producer.go +++ b/vendor/google.golang.org/grpc/orca/producer.go @@ -37,7 +37,7 @@ import ( type producerBuilder struct{} // Build constructs and returns a producer and its cleanup function -func (*producerBuilder) Build(cci interface{}) (balancer.Producer, func()) { +func (*producerBuilder) Build(cci any) (balancer.Producer, func()) { p := &producer{ client: v3orcaservicegrpc.NewOpenRcaServiceClient(cci.(grpc.ClientConnInterface)), intervals: make(map[time.Duration]int), diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 02f975951242d..236837f4157cf 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,21 +28,26 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - idle bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + idle bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. @@ -95,6 +100,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { @@ -129,6 +135,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index abe266b021d2e..2e9cf66b4afc4 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -26,12 +26,18 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -40,7 +46,9 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { @@ -57,23 +65,36 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfg := &pfConfig{} - if err := json.Unmarshal(js, cfg); err != nil { + if !envconfig.PickFirstLBConfig { + // Prior to supporting loadbalancing configuration, the pick_first LB + // policy did not implement the balancer.ConfigParser interface. This + // meant that if a non-empty configuration was passed to it, the service + // config unmarshaling code would throw a warning log, but would + // continue using the pick_first LB policy. The code below ensures the + // same behavior is retained if the env var is not set. + if string(js) != "{}" { + logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) + } + return nil, nil + } + + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) } return cfg, nil } type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn - cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -96,35 +117,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if state.BalancerConfig != nil { - cfg, ok := state.BalancerConfig.(*pfConfig) - if !ok { - return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - b.cfg = cfg + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - - if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -143,13 +173,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index cd45547854f07..73bd63364335e 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f27978e1281fe..e6b0f14cd941f 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -29,10 +29,11 @@ import ( // NewBuilderWithScheme creates a new test resolver builder with the given scheme. func NewBuilderWithScheme(scheme string) *Resolver { return &Resolver{ - BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, - ResolveNowCallback: func(resolver.ResolveNowOptions) {}, - CloseCallback: func() {}, - scheme: scheme, + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, + UpdateStateCallback: func(error) {}, + ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, + scheme: scheme, } } @@ -42,6 +43,11 @@ type Resolver struct { // BuildCallback is called when the Build method is called. Must not be // nil. Must not be changed after the resolver may be built. BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) + // UpdateStateCallback is called when the UpdateState method is called on + // the resolver. The value passed as argument to this callback is the value + // returned by the resolver.ClientConn. Must not be nil. Must not be + // changed after the resolver may be built. + UpdateStateCallback func(err error) // ResolveNowCallback is called when the ResolveNow method is called on the // resolver. Must not be nil. Must not be changed after the resolver may // be built. @@ -93,8 +99,9 @@ func (r *Resolver) Close() { // UpdateState calls CC.UpdateState. func (r *Resolver) UpdateState(s resolver.State) { r.mu.Lock() - r.CC.UpdateState(s) + err := r.CC.UpdateState(s) r.mu.Unlock() + r.UpdateStateCallback(err) } // ReportError calls CC.ReportError. diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index efcb7f3efd82d..804be887de0ac 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 353c10b69a5b8..11384e228e54d 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -77,25 +77,6 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -111,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -126,27 +104,29 @@ type Address struct { // BalancerAttributes contains arbitrary data about this address intended // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. @@ -190,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -254,20 +260,7 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -321,10 +314,3 @@ type Resolver interface { // Close closes the resolver. Close() } - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index b408b3688f2ea..d68330560848f 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -133,7 +133,7 @@ func (ccr *ccResolverWrapper) close() { ccr.mu.Unlock() // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done + <-ccr.serializer.Done() // Spawn a goroutine to close the resolver (since it may block trying to // cleanup all allocated resources) and return early. @@ -152,6 +152,14 @@ func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) // which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { errCh := make(chan error, 1) + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } ok := ccr.serializer.Schedule(func(context.Context) { ccr.addChannelzTraceEvent(s) ccr.curState = s diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2030736a306ba..b7723aa09cbb6 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) @@ -577,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -625,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -692,7 +693,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, @@ -726,12 +727,12 @@ type payloadInfo struct { } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) @@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -791,16 +792,18 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -860,19 +863,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8869cc906f25e..244123c6c5a89 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -86,7 +86,7 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,20 +99,26 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} + mdata any +} + +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream } // Server is a gRPC server to serve RPC requests. @@ -139,7 +145,7 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan func() + serverWorkerChannel chan *serverWorkerData } type serverOptions struct { @@ -164,19 +170,21 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool } var defaultServerOptions = serverOptions{ - maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -228,6 +236,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + // WriteBufferSize determines how much data can be batched before doing a write // on the wire. The corresponding memory allocation for this buffer will be // twice the size to keep syscalls low. The default value for this buffer is @@ -268,9 +290,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -382,9 +404,6 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { - if n == 0 { - n = math.MaxUint32 - } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -550,6 +569,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -565,19 +605,24 @@ const serverWorkerResetThreshold = 1 << 16 // [1] https://github.com/golang/go/issues/18138 func (s *Server) serverWorker() { for completed := 0; completed < serverWorkerResetThreshold; completed++ { - f, ok := <-s.serverWorkerChannel + data, ok := <-s.serverWorkerChannel if !ok { return } - f() + s.handleSingleStream(data) } go s.serverWorker() } +func (s *Server) handleSingleStream(data *serverWorkerData) { + defer data.wg.Done() + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) +} + // initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannel = make(chan *serverWorkerData) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } @@ -625,7 +670,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -633,7 +678,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -648,14 +693,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -666,7 +711,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -707,7 +752,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -908,6 +953,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -936,26 +982,21 @@ func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) - - streamQuota.acquire() - f := func() { - defer streamQuota.release() - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - } - if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannel <- f: + case s.serverWorkerChannel <- data: return default: // If all stream workers are busy, fallback to the default code path. } } - go f() + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -1094,7 +1135,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1141,7 +1182,7 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } @@ -1150,7 +1191,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { + return func(ctx context.Context, req any) (any, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1187,7 +1228,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1294,7 +1335,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1304,7 +1345,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1468,7 +1509,7 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } @@ -1477,7 +1518,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf if curr == len(interceptors)-1 { return finalHandler } - return func(srv interface{}, stream ServerStream) error { + return func(srv any, stream ServerStream) error { return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1504,7 +1545,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1518,7 +1559,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1621,7 +1662,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1687,13 +1728,13 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str pos := strings.LastIndex(sm, "/") if pos == -1 { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) @@ -1734,7 +1775,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) @@ -2050,32 +2091,3 @@ func validateSendCompressor(name, clientCompressors string) error { } return fmt.Errorf("client does not support compressor %q", name) } - -// atomicSemaphore implements a blocking, counting semaphore. acquire should be -// called synchronously; release may be called asynchronously. -type atomicSemaphore struct { - n int64 - wait chan struct{} -} - -func (q *atomicSemaphore) acquire() { - if atomic.AddInt64(&q.n, -1) < 0 { - // We ran out of quota. Block until a release happens. - <-q.wait - } -} - -func (q *atomicSemaphore) release() { - // N.B. the "<= 0" check below should allow for this to work with multiple - // concurrent calls to acquire, but also note that with synchronous calls to - // acquire, as our system does, n will never be less than -1. There are - // fairness issues (queuing) to consider if this was to be generalized. - if atomic.AddInt64(&q.n, 1) <= 0 { - // An acquire was waiting on us. Unblock it. - q.wait <- struct{}{} - } -} - -func newHandlerQuota(n uint32) *atomicSemaphore { - return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} -} diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 0000000000000..48a64cfe8e256 --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 7a552a9b78711..4ab70e2d462ab 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte @@ -134,7 +144,7 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte // Length is the size of the uncompressed payload data. Does not include any diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index bcf2e4d81beb3..a93360efb8475 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -99,25 +99,27 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error has status nil, which maps to codes.OK. There // is no sensible behavior for this, so we turn it into // an error with codes.Unknown and discard the existing // status. return New(codes.Unknown, err.Error()), false } - return gs.GRPCStatus(), true + return grpcStatus, true } var gs grpcstatus if errors.As(err, &gs) { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error wraps an error that has status nil, which maps // to codes.OK. There is no sensible behavior for this, // so we turn it into an error with codes.Unknown and // discard the existing status. return New(codes.Unknown, err.Error()), false } - p := gs.GRPCStatus().Proto() + p := grpcStatus.Proto() p.Message = err.Error() return status.FromProto(p), true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 10092685b2283..b14b2fbea2ebd 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -54,7 +55,7 @@ import ( // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +80,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +91,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -126,7 +129,7 @@ type ClientStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -135,7 +138,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -155,11 +158,6 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return nil, err - } - defer cc.idlenessMgr.onCallEnd() - // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -176,6 +174,16 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { @@ -433,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -507,7 +515,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -788,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -820,6 +829,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -860,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -904,7 +914,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -928,24 +938,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } } return err } @@ -1001,18 +993,30 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -1028,7 +1032,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1055,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1270,7 +1274,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1348,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1393,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1512,7 +1516,7 @@ type ServerStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1521,7 +1525,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1602,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1610,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1677,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1685,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1757,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 07a2d26b3e77d..9ded79321ba7a 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 3cc754062185a..d3f5bcbfcef8b 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.56.3" +const Version = "1.58.2" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index a8e4732b3d204..bbc9e2e3c8e36 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -84,6 +84,9 @@ not git grep -l 'x/net/context' -- "*.go" # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' + # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -106,7 +109,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.17 + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -168,8 +171,6 @@ proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB Target is deprecated: Use the Target field in the BuildOptions instead. xxx_messageInfo_ ' "${SC_OUT}" diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index 20891c7a4cb86..f8f749835c24f 100644 --- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -97,7 +97,6 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts if !runDirectPath() { // If not xDS, fallback to DNS. - t.Scheme = dnsName t.URL.Scheme = dnsName return resolver.Get(dnsName).Build(t, cc, opts) } @@ -144,7 +143,6 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts } // Create and return an xDS resolver. - t.Scheme = xdsName t.URL.Scheme = xdsName if envconfig.XDSFederation { t = resolver.Target{ diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go index 68ed789f2a4de..ff27af026db59 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go @@ -20,6 +20,7 @@ package balancer import ( + _ "google.golang.org/grpc/balancer/leastrequest" // Register the least_request_experimental balancer _ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index bcdeaf681ab5e..85a081d09df55 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -149,13 +149,6 @@ type ccUpdate struct { err error } -// scUpdate wraps a subConn update received from gRPC. This is directly passed -// on to the cluster_resolver balancer. -type scUpdate struct { - subConn balancer.SubConn - state balancer.SubConnState -} - type exitIdle struct{} // cdsBalancer implements a CDS based LB policy. It instantiates a @@ -388,7 +381,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { var sc serviceconfig.LoadBalancingConfig if sc, err = b.crParser.ParseConfig(crLBCfgJSON); err != nil { - b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", crLBCfgJSON, err) + b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(crLBCfgJSON), err) return } @@ -415,14 +408,6 @@ func (b *cdsBalancer) run() { switch update := u.(type) { case *ccUpdate: b.handleClientConnUpdate(update) - case *scUpdate: - // SubConn updates are passthrough and are simply handed over to - // the underlying cluster_resolver balancer. - if b.childLB == nil { - b.logger.Errorf("Received SubConn update with no child policy: %+v", update) - break - } - b.childLB.UpdateSubConnState(update.subConn, update.state) case exitIdle: if b.childLB == nil { b.logger.Errorf("Received ExitIdle with no child policy") @@ -540,11 +525,7 @@ func (b *cdsBalancer) ResolverError(err error) { // UpdateSubConnState handles subConn updates from gRPC. func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if b.closed.HasFired() { - b.logger.Warningf("Received subConn update after close: {%v, %v}", sc, state) - return - } - b.updateCh.Put(&scUpdate{subConn: sc, state: state}) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } // Close cancels the CDS watch, closes the child policy and closes the @@ -580,6 +561,8 @@ func (ccw *ccWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubC for i, addr := range addrs { newAddrs[i] = xdsinternal.SetHandshakeInfo(addr, ccw.xdsHI) } + // No need to override opts.StateListener; just forward all calls to the + // child that created the SubConn. return ccw.ClientConn.NewSubConn(newAddrs, opts) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go index e1a18ae338d39..407d2deff7d6d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" @@ -64,11 +65,11 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), loadWrapper: loadstore.NewWrapper(), - scWrappers: make(map[balancer.SubConn]*scWrapper), pickerUpdateCh: buffer.NewUnbounded(), requestCountMax: defaultRequestCountMax, } b.logger = prefixLogger(b) + b.child = gracefulswitch.NewBalancer(b, bOpts) go b.run() b.logger.Infof("Created") return b @@ -102,7 +103,7 @@ type clusterImplBalancer struct { xdsClient xdsclient.XDSClient config *LBConfig - childLB balancer.Balancer + child *gracefulswitch.Balancer cancelLoadReport func() edsServiceName string lrsServer *bootstrap.ServerConfig @@ -111,18 +112,6 @@ type clusterImplBalancer struct { clusterNameMu sync.Mutex clusterName string - scWrappersMu sync.Mutex - // The SubConns passed to the child policy are wrapped in a wrapper, to keep - // locality ID. But when the parent ClientConn sends updates, it's going to - // give the original SubConn, not the wrapper. But the child policies only - // know about the wrapper, so when forwarding SubConn updates, they must be - // sent for the wrappers. - // - // This keeps a map from original SubConn to wrapper, so that when - // forwarding the SubConn state update, the child policy will get the - // wrappers. - scWrappers map[balancer.SubConn]*scWrapper - // childState/drops/requestCounter keeps the state used by the most recently // generated picker. All fields can only be accessed in run(). And run() is // the only goroutine that sends picker to the parent ClientConn. All @@ -251,31 +240,19 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) return err } - // If child policy is a different type, recreate the sub-balancer. if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { - if b.childLB != nil { - b.childLB.Close() + if err := b.child.SwitchTo(bb); err != nil { + return fmt.Errorf("error switching to child of type %q: %v", newConfig.ChildPolicy.Name, err) } - b.childLB = bb.Build(b, b.bOpts) } b.config = newConfig - if b.childLB == nil { - // This is not an expected situation, and should be super rare in - // practice. - // - // When this happens, we already applied all the other configurations - // (drop/circuit breaking), but there's no child policy. This balancer - // will be stuck, and we report the error to the parent. - return fmt.Errorf("child policy is nil, this means balancer %q's Build() returned nil", newConfig.ChildPolicy.Name) - } - // Notify run() of this new config, in case drop and request counter need // update (which means a new picker needs to be generated). b.pickerUpdateCh.Put(newConfig) // Addresses and sub-balancer config are sent to sub-balancer. - return b.childLB.UpdateClientConnState(balancer.ClientConnState{ + return b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, BalancerConfig: b.config.ChildPolicy.Config, }) @@ -286,13 +263,10 @@ func (b *clusterImplBalancer) ResolverError(err error) { b.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err) return } - - if b.childLB != nil { - b.childLB.ResolverError(err) - } + b.child.ResolverError(err) } -func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *clusterImplBalancer) updateSubConnState(sc balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { if b.closed.HasFired() { b.logger.Warningf("xds: received subconn state change {%+v, %+v} after clusterImplBalancer was closed", sc, s) return @@ -309,48 +283,29 @@ func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer b.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) } - b.scWrappersMu.Lock() - if scw, ok := b.scWrappers[sc]; ok { - sc = scw - if s.ConnectivityState == connectivity.Shutdown { - // Remove this SubConn from the map on Shutdown. - delete(b.scWrappers, scw.SubConn) - } - } - b.scWrappersMu.Unlock() - if b.childLB != nil { - b.childLB.UpdateSubConnState(sc, s) + if cb != nil { + cb(s) } } +func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, s) +} + func (b *clusterImplBalancer) Close() { b.mu.Lock() b.closed.Fire() b.mu.Unlock() - if b.childLB != nil { - b.childLB.Close() - b.childLB = nil - b.childState = balancer.State{} - } + b.child.Close() + b.childState = balancer.State{} b.pickerUpdateCh.Close() <-b.done.Done() b.logger.Infof("Shutdown") } func (b *clusterImplBalancer) ExitIdle() { - if b.childLB == nil { - return - } - if ei, ok := b.childLB.(balancer.ExitIdler); ok { - ei.ExitIdle() - return - } - // Fallback for children that don't support ExitIdle -- connect to all - // SubConns. - for _, sc := range b.scWrappers { - sc.Connect() - } + b.child.ExitIdle() } // Override methods to accept updates from the child LB. @@ -407,33 +362,21 @@ func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) lID = xdsinternal.GetLocalityID(newAddrs[i]) } + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { b.updateSubConnState(sc, state, oldListener) } sc, err := b.ClientConn.NewSubConn(newAddrs, opts) if err != nil { return nil, err } // Wrap this SubConn in a wrapper, and add it to the map. - b.scWrappersMu.Lock() ret := &scWrapper{SubConn: sc} ret.updateLocalityID(lID) - b.scWrappers[sc] = ret - b.scWrappersMu.Unlock() return ret, nil } func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) { - scw, ok := sc.(*scWrapper) - if !ok { - b.ClientConn.RemoveSubConn(sc) - return - } - // Remove the original SubConn from the parent ClientConn. - // - // Note that we don't remove this SubConn from the scWrappers map. We will - // need it to forward the final SubConn state Shutdown to the child policy. - // - // This entry is kept in the map until it's state is changes to Shutdown, - // and will be deleted in UpdateSubConnState(). - b.ClientConn.RemoveSubConn(scw.SubConn) + b.logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) } func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go index 6ac7a39b2b4c6..db8332b90eacf 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go @@ -22,6 +22,7 @@ package clustermanager import ( "encoding/json" "fmt" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/grpclog" @@ -46,7 +47,13 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal b.logger = prefixLogger(b) b.stateAggregator = newBalancerStateAggregator(cc, b.logger) b.stateAggregator.start() - b.bg = balancergroup.New(cc, opts, b.stateAggregator, b.logger) + b.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: opts, + StateAggregator: b.stateAggregator, + Logger: b.logger, + SubBalancerCloseTimeout: time.Duration(0), // Disable caching of removed child policies + }) b.bg.Start() b.logger.Infof("Created") return b @@ -134,7 +141,7 @@ func (b *bal) ResolverError(err error) { } func (b *bal) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - b.bg.UpdateSubConnState(sc, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } func (b *bal) Close() { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go index 5eadd1ac1d0ec..6a60bc308a96c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -17,7 +17,7 @@ */ // Package clusterresolver contains the implementation of the -// xds_cluster_resolver_experimental LB policy which resolves endpoint addresses +// cluster_resolver_experimental LB policy which resolves endpoint addresses // using a list of one or more discovery mechanisms. package clusterresolver @@ -85,9 +85,10 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal b.logger = prefixLogger(b) b.logger.Infof("Created") - b.resourceWatcher = newResourceResolver(b) + b.resourceWatcher = newResourceResolver(b, b.logger) b.cc = &ccWrapper{ ClientConn: cc, + b: b, resourceWatcher: b.resourceWatcher, } @@ -149,13 +150,6 @@ type ccUpdate struct { err error } -// scUpdate wraps a subConn update received from gRPC. This is directly passed -// on to the child policy. -type scUpdate struct { - subConn balancer.SubConn - state balancer.SubConnState -} - type exitIdle struct{} // clusterResolverBalancer resolves endpoint addresses using a list of one or @@ -253,8 +247,15 @@ func (b *clusterResolverBalancer) updateChildConfig() { } b.logger.Infof("Built child policy config: %v", pretty.ToJSON(childCfg)) + endpoints := make([]resolver.Endpoint, len(addrs)) + for i, a := range addrs { + endpoints[i].Attributes = a.BalancerAttributes + endpoints[i].Addresses = []resolver.Address{a} + endpoints[i].Addresses[0].BalancerAttributes = nil + } if err := b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ + Endpoints: endpoints, Addresses: addrs, ServiceConfig: b.configRaw, Attributes: b.attrsWithClient, @@ -279,7 +280,7 @@ func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bo // EDS resource was removed. No action needs to be taken for this, and we // should continue watching the same EDS resource. if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { - b.resourceWatcher.stop() + b.resourceWatcher.stop(false) } if b.child != nil { @@ -306,14 +307,6 @@ func (b *clusterResolverBalancer) run() { switch update := u.(type) { case *ccUpdate: b.handleClientConnUpdate(update) - case *scUpdate: - // SubConn updates are simply handed over to the underlying - // child balancer. - if b.child == nil { - b.logger.Errorf("Received a SubConn update {%+v} with no child policy", update) - break - } - b.child.UpdateSubConnState(update.subConn, update.state) case exitIdle: if b.child == nil { b.logger.Errorf("xds: received ExitIdle with no child balancer") @@ -333,7 +326,7 @@ func (b *clusterResolverBalancer) run() { // Close results in stopping the endpoint resolvers and closing the // underlying child policy and is the only way to exit this goroutine. case <-b.closed.Done(): - b.resourceWatcher.stop() + b.resourceWatcher.stop(true) if b.child != nil { b.child.Close() @@ -380,11 +373,7 @@ func (b *clusterResolverBalancer) ResolverError(err error) { // UpdateSubConnState handles subConn updates from gRPC. func (b *clusterResolverBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if b.closed.HasFired() { - b.logger.Warningf("Received subConn update {%v, %v} after close", sc, state) - return - } - b.updateCh.Put(&scUpdate{subConn: sc, state: state}) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } // Close closes the cdsBalancer and the underlying child balancer. @@ -398,9 +387,13 @@ func (b *clusterResolverBalancer) ExitIdle() { } // ccWrapper overrides ResolveNow(), so that re-resolution from the child -// policies will trigger the DNS resolver in cluster_resolver balancer. +// policies will trigger the DNS resolver in cluster_resolver balancer. It +// also intercepts NewSubConn calls in case children don't set the +// StateListener, to allow redirection to happen via this cluster_resolver +// balancer. type ccWrapper struct { balancer.ClientConn + b *clusterResolverBalancer resourceWatcher *resourceResolver } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go index 4b83dfb2bfa07..d1fb717d878bf 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go @@ -190,7 +190,17 @@ func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.Endpoint }) } - priorities := groupLocalitiesByPriority(edsResp.Localities) + // Localities of length 0 is triggered by an NACK or resource-not-found + // error before update, or a empty localities list in a update. In either + // case want to create a priority, and send down empty address list, causing + // TF for that priority. "If any discovery mechanism instance experiences an + // error retrieving data, and it has not previously reported any results, it + // should report a result that is a single priority with no endpoints." - + // A37 + priorities := [][]xdsresource.Locality{{}} + if len(edsResp.Localities) != 0 { + priorities = groupLocalitiesByPriority(edsResp.Localities) + } retNames := g.generate(priorities) retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) var retAddrs []resolver.Address diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index 580734a021549..b9a81e9ba8293 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -19,8 +19,11 @@ package clusterresolver import ( + "context" "sync" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -37,7 +40,6 @@ type resourceUpdate struct { // from underlying concrete resolvers. type topLevelResolver interface { onUpdate() - onError(error) } // endpointsResolver wraps the functionality to resolve a given resource name to @@ -52,7 +54,7 @@ type endpointsResolver interface { // The second return result indicates whether the resolver was able to // successfully resolve the resource name to endpoints. If set to false, the // first return result is invalid and must not be used. - lastUpdate() (interface{}, bool) + lastUpdate() (any, bool) // resolverNow triggers re-resolution of the resource. resolveNow() @@ -83,8 +85,11 @@ type discoveryMechanismAndResolver struct { } type resourceResolver struct { - parent *clusterResolverBalancer - updateChannel chan *resourceUpdate + parent *clusterResolverBalancer + logger *grpclog.PrefixLogger + updateChannel chan *resourceUpdate + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc // mu protects the slice and map, and content of the resolvers in the slice. mu sync.Mutex @@ -104,12 +109,17 @@ type resourceResolver struct { childNameGeneratorSeqID uint64 } -func newResourceResolver(parent *clusterResolverBalancer) *resourceResolver { - return &resourceResolver{ +func newResourceResolver(parent *clusterResolverBalancer, logger *grpclog.PrefixLogger) *resourceResolver { + rr := &resourceResolver{ parent: parent, + logger: logger, updateChannel: make(chan *resourceUpdate, 1), childrenMap: make(map[discoveryMechanismKey]discoveryMechanismAndResolver), } + ctx, cancel := context.WithCancel(context.Background()) + rr.serializer = grpcsync.NewCallbackSerializer(ctx) + rr.serializerCancel = cancel + return rr } func equalDiscoveryMechanisms(a, b []DiscoveryMechanism) bool { @@ -172,9 +182,9 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { var resolver endpointsResolver switch dm.Type { case DiscoveryMechanismTypeEDS: - resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr) + resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr, rr.logger) case DiscoveryMechanismTypeLogicalDNS: - resolver = newDNSResolver(dmKey.name, rr) + resolver = newDNSResolver(dmKey.name, rr, rr.logger) } dmAndResolver = discoveryMechanismAndResolver{ dm: dm, @@ -208,8 +218,9 @@ func (rr *resourceResolver) resolveNow() { } } -func (rr *resourceResolver) stop() { +func (rr *resourceResolver) stop(closing bool) { rr.mu.Lock() + // Save the previous childrenMap to stop the children outside the mutex, // and reinitialize the map. We only need to reinitialize to allow for the // policy to be reused if the resource comes back. In practice, this does @@ -220,12 +231,18 @@ func (rr *resourceResolver) stop() { rr.childrenMap = make(map[discoveryMechanismKey]discoveryMechanismAndResolver) rr.mechanisms = nil rr.children = nil + rr.mu.Unlock() for _, r := range cm { r.r.stop() } + if closing { + rr.serializerCancel() + <-rr.serializer.Done() + } + // stop() is called when the LB policy is closed or when the underlying // cluster resource is removed by the management server. In the latter case, // an empty config update needs to be pushed to the child policy to ensure @@ -270,15 +287,9 @@ func (rr *resourceResolver) generateLocked() { } func (rr *resourceResolver) onUpdate() { - rr.mu.Lock() - rr.generateLocked() - rr.mu.Unlock() -} - -func (rr *resourceResolver) onError(err error) { - select { - case <-rr.updateChannel: - default: - } - rr.updateChannel <- &resourceUpdate{err: err} + rr.serializer.Schedule(func(context.Context) { + rr.mu.Lock() + rr.generateLocked() + rr.mu.Unlock() + }) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index 06af9cc6df327..9052190b0ff08 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -23,6 +23,8 @@ import ( "net/url" "sync" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -42,6 +44,7 @@ type dnsDiscoveryMechanism struct { target string topLevelResolver topLevelResolver dnsR resolver.Resolver + logger *grpclog.PrefixLogger mu sync.Mutex addrs []string @@ -64,27 +67,36 @@ type dnsDiscoveryMechanism struct { // // The `dnsR` field is unset if we run into erros in this function. Therefore, a // nil check is required wherever we access that field. -func newDNSResolver(target string, topLevelResolver topLevelResolver) *dnsDiscoveryMechanism { +func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *dnsDiscoveryMechanism { ret := &dnsDiscoveryMechanism{ target: target, topLevelResolver: topLevelResolver, + logger: logger, } u, err := url.Parse("dns:///" + target) if err != nil { - topLevelResolver.onError(fmt.Errorf("failed to parse dns hostname %q in clusterresolver LB policy", target)) + if ret.logger.V(2) { + ret.logger.Infof("Failed to parse dns hostname %q in clusterresolver LB policy", target) + } + ret.updateReceived = true + ret.topLevelResolver.onUpdate() return ret } - r, err := newDNS(resolver.Target{Scheme: "dns", URL: *u}, ret, resolver.BuildOptions{}) + r, err := newDNS(resolver.Target{URL: *u}, ret, resolver.BuildOptions{}) if err != nil { - topLevelResolver.onError(fmt.Errorf("failed to build DNS resolver for target %q: %v", target, err)) + if ret.logger.V(2) { + ret.logger.Infof("Failed to build DNS resolver for target %q: %v", target, err) + } + ret.updateReceived = true + ret.topLevelResolver.onUpdate() return ret } ret.dnsR = r return ret } -func (dr *dnsDiscoveryMechanism) lastUpdate() (interface{}, bool) { +func (dr *dnsDiscoveryMechanism) lastUpdate() (any, bool) { dr.mu.Lock() defer dr.mu.Unlock() @@ -116,10 +128,26 @@ func (dr *dnsDiscoveryMechanism) stop() { // updates from the real DNS resolver. func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported an update: %s", dr.target, pretty.ToJSON(state)) + } + dr.mu.Lock() - addrs := make([]string, len(state.Addresses)) - for i, a := range state.Addresses { - addrs[i] = a.Addr + var addrs []string + if len(state.Endpoints) > 0 { + // Assume 1 address per endpoint, which is how DNS is expected to + // behave. The slice will grow as needed, however. + addrs = make([]string, 0, len(state.Endpoints)) + for _, e := range state.Endpoints { + for _, a := range e.Addresses { + addrs = append(addrs, a.Addr) + } + } + } else { + addrs = make([]string, len(state.Addresses)) + for i, a := range state.Addresses { + addrs[i] = a.Addr + } } dr.addrs = addrs dr.updateReceived = true @@ -130,7 +158,25 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { } func (dr *dnsDiscoveryMechanism) ReportError(err error) { - dr.topLevelResolver.onError(err) + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported error: %v", dr.target, err) + } + + dr.mu.Lock() + // If a previous good update was received, suppress the error and continue + // using the previous update. If RPCs were succeeding prior to this, they + // will continue to do so. Also suppress errors if we previously received an + // error, since there will be no downstream effects of propagating this + // error. + if dr.updateReceived { + dr.mu.Unlock() + return + } + dr.addrs = nil + dr.updateReceived = true + dr.mu.Unlock() + + dr.topLevelResolver.onUpdate() } func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go index 2517cf49159cd..3d0ec356e93a9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -21,32 +21,30 @@ package clusterresolver import ( "sync" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -type edsResourceWatcher interface { - WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() -} - type edsDiscoveryMechanism struct { + nameToWatch string cancelWatch func() topLevelResolver topLevelResolver stopped *grpcsync.Event + logger *grpclog.PrefixLogger - mu sync.Mutex - update xdsresource.EndpointsUpdate - updateReceived bool + mu sync.Mutex + update *xdsresource.EndpointsUpdate // Nil indicates no update received so far. } -func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { +func (er *edsDiscoveryMechanism) lastUpdate() (any, bool) { er.mu.Lock() defer er.mu.Unlock() - if !er.updateReceived { + if er.update == nil { return nil, false } - return er.update, true + return *er.update, true } func (er *edsDiscoveryMechanism) resolveNow() { @@ -64,31 +62,79 @@ func (er *edsDiscoveryMechanism) stop() { er.cancelWatch() } -func (er *edsDiscoveryMechanism) handleEndpointsUpdate(update xdsresource.EndpointsUpdate, err error) { +// newEDSResolver returns an implementation of the endpointsResolver interface +// that uses EDS to resolve the given name to endpoints. +func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *edsDiscoveryMechanism { + ret := &edsDiscoveryMechanism{ + nameToWatch: nameToWatch, + topLevelResolver: topLevelResolver, + logger: logger, + stopped: grpcsync.NewEvent(), + } + ret.cancelWatch = xdsresource.WatchEndpoints(producer, nameToWatch, ret) + return ret +} + +// OnUpdate is invoked to report an update for the resource being watched. +func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData) { if er.stopped.HasFired() { return } - if err != nil { - er.topLevelResolver.onError(err) + er.mu.Lock() + er.update = &update.Resource + er.mu.Unlock() + + er.topLevelResolver.onUpdate() +} + +func (er *edsDiscoveryMechanism) OnError(err error) { + if er.stopped.HasFired() { return } + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported error: %v", er.nameToWatch, err) + } + er.mu.Lock() - er.update = update - er.updateReceived = true + if er.update != nil { + // Continue using a previously received good configuration if one + // exists. + er.mu.Unlock() + return + } + + // Else report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.update = &xdsresource.EndpointsUpdate{} er.mu.Unlock() er.topLevelResolver.onUpdate() } -// newEDSResolver returns an implementation of the endpointsResolver interface -// that uses EDS to resolve the given name to endpoints. -func newEDSResolver(nameToWatch string, watcher edsResourceWatcher, topLevelResolver topLevelResolver) *edsDiscoveryMechanism { - ret := &edsDiscoveryMechanism{ - topLevelResolver: topLevelResolver, - stopped: grpcsync.NewEvent(), +func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { + if er.stopped.HasFired() { + return } - ret.cancelWatch = watcher.WatchEndpoints(nameToWatch, ret.handleEndpointsUpdate) - return ret + + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported resource-does-not-exist error", er.nameToWatch) + } + + // Report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.mu.Lock() + er.update = &xdsresource.EndpointsUpdate{} + er.mu.Unlock() + + er.topLevelResolver.onUpdate() } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go index eaf4f7fc9ab78..965297a73dbc3 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -345,7 +345,7 @@ func (b *outlierDetectionBalancer) ResolverError(err error) { b.child.ResolverError(err) } -func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +func (b *outlierDetectionBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { b.mu.Lock() defer b.mu.Unlock() scw, ok := b.scWrappers[sc] @@ -364,6 +364,10 @@ func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state }) } +func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + func (b *outlierDetectionBalancer) Close() { b.closed.Fire() <-b.done.Done() @@ -466,6 +470,9 @@ func (b *outlierDetectionBalancer) UpdateState(s balancer.State) { } func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { b.updateSubConnState(sc, state) } sc, err := b.cc.NewSubConn(addrs, opts) if err != nil { return nil, err @@ -474,6 +481,7 @@ func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts bal SubConn: sc, addresses: addrs, scUpdateCh: b.scUpdateCh, + listener: oldListener, } b.mu.Lock() defer b.mu.Unlock() @@ -494,14 +502,7 @@ func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts bal } func (b *outlierDetectionBalancer) RemoveSubConn(sc balancer.SubConn) { - scw, ok := sc.(*subConnWrapper) - if !ok { // Shouldn't happen - return - } - // Remove the wrapped SubConn from the parent Client Conn. We don't remove - // from map entry until we get a Shutdown state for the SubConn, as we need - // that data to forward that state down. - b.cc.RemoveSubConn(scw.SubConn) + b.logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) } // appendIfPresent appends the scw to the address, if the address is present in @@ -614,9 +615,11 @@ func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { scw := u.scw scw.latestState = u.state if !scw.ejected { - b.childMu.Lock() - b.child.UpdateSubConnState(scw, u.state) - b.childMu.Unlock() + if scw.listener != nil { + b.childMu.Lock() + scw.listener(u.state) + b.childMu.Unlock() + } } } @@ -633,9 +636,11 @@ func (b *outlierDetectionBalancer) handleEjectedUpdate(u *ejectionUpdate) { ConnectivityState: connectivity.TransientFailure, } } - b.childMu.Lock() - b.child.UpdateSubConnState(scw, stateToUpdate) - b.childMu.Unlock() + if scw.listener != nil { + b.childMu.Lock() + scw.listener(stateToUpdate) + b.childMu.Unlock() + } } // handleChildStateUpdate forwards the picker update wrapped in a wrapped picker diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go index 71a996f29ae06..0fa422d8f262e 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -31,6 +31,7 @@ import ( // whether or not this SubConn is ejected. type subConnWrapper struct { balancer.SubConn + listener func(balancer.SubConnState) // addressInfo is a pointer to the subConnWrapper's corresponding address // map entry, if the map entry exists. diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go index 40c047d558b77..7efbe402a8e5c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -45,6 +45,10 @@ import ( // Name is the name of the priority balancer. const Name = "priority_experimental" +// DefaultSubBalancerCloseTimeout is defined as a variable instead of const for +// testing. +var DefaultSubBalancerCloseTimeout = 15 * time.Minute + func init() { balancer.Register(bb{}) } @@ -60,7 +64,13 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba } b.logger = prefixLogger(b) - b.bg = balancergroup.New(cc, bOpts, b, b.logger) + b.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: bOpts, + StateAggregator: b, + Logger: b.logger, + SubBalancerCloseTimeout: DefaultSubBalancerCloseTimeout, + }) b.bg.Start() go b.run() b.logger.Infof("Created") @@ -200,7 +210,7 @@ func (b *priorityBalancer) ResolverError(err error) { } func (b *priorityBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - b.bg.UpdateSubConnState(sc, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } func (b *priorityBalancer) Close() { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go index ec3b5605690d1..b450716fa0f05 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go @@ -29,12 +29,17 @@ import ( ) type picker struct { - ring *ring - logger *grpclog.PrefixLogger + ring *ring + logger *grpclog.PrefixLogger + subConnStates map[*subConn]connectivity.State } func newPicker(ring *ring, logger *grpclog.PrefixLogger) *picker { - return &picker{ring: ring, logger: logger} + states := make(map[*subConn]connectivity.State) + for _, e := range ring.items { + states[e.sc] = e.sc.effectiveState() + } + return &picker{ring: ring, logger: logger, subConnStates: states} } // handleRICSResult is the return type of handleRICS. It's needed to wrap the @@ -54,7 +59,7 @@ type handleRICSResult struct { // or Shutdown. If it's true, the PickResult and error should be returned from // Pick() as is. func (p *picker) handleRICS(e *ringEntry) (handleRICSResult, bool) { - switch state := e.sc.effectiveState(); state { + switch state := p.subConnStates[e.sc]; state { case connectivity.Ready: return handleRICSResult{pr: balancer.PickResult{SubConn: e.sc.sc}}, true case connectivity.Idle: @@ -118,7 +123,7 @@ func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, erro // but don't not trigger Connect() on the other SubConns. var firstNonFailedFound bool for ee := nextSkippingDuplicates(p.ring, e2); ee != e; ee = nextSkippingDuplicates(p.ring, ee) { - scState := ee.sc.effectiveState() + scState := p.subConnStates[ee.sc] if scState == connectivity.Ready { return balancer.PickResult{SubConn: ee.sc.sc}, nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go index b9caefa63a2d1..e63c6f653904a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -218,7 +218,12 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { addrsSet.Set(addr, true) newWeight := getWeightAttribute(addr) if val, ok := b.subConns.Get(addr); !ok { - sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{HealthCheckEnabled: true}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: true, + StateListener: func(state balancer.SubConnState) { b.updateSubConnState(sc, state) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { b.logger.Warningf("Failed to create new SubConn: %v", err) continue @@ -252,11 +257,11 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { if _, ok := addrsSet.Get(addr); !ok { v, _ := b.subConns.Get(addr) scInfo := v.(*subConn) - b.cc.RemoveSubConn(scInfo.sc) + scInfo.sc.Shutdown() b.subConns.Delete(addr) addrsUpdated = true // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } return addrsUpdated @@ -321,7 +326,11 @@ func (b *ringhashBalancer) ResolverError(err error) { }) } -// UpdateSubConnState updates the per-SubConn state stored in the ring, and also +func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +// updateSubConnState updates the per-SubConn state stored in the ring, and also // the aggregated state. // // It triggers an update to cc when: @@ -332,7 +341,7 @@ func (b *ringhashBalancer) ResolverError(err error) { // - the aggregated state is changed // - the same picker will be sent again, but this update may trigger a re-pick // for some RPCs. -func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +func (b *ringhashBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { b.logger.Infof("Handle SubConn state change: %p, %v", sc, s) @@ -347,37 +356,23 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance newSCState := scs.effectiveState() b.logger.Infof("SubConn's effective old state was: %v, new state is %v", oldSCState, newSCState) - var sendUpdate bool - oldBalancerState := b.state b.state = b.csEvltr.recordTransition(oldSCState, newSCState) - if oldBalancerState != b.state { - sendUpdate = true - } switch s { - case connectivity.Idle: - // No need to send an update. No queued RPC can be unblocked. If the - // overall state changed because of this, sendUpdate is already true. - case connectivity.Connecting: - // No need to send an update. No queued RPC can be unblocked. If the - // overall state changed because of this, sendUpdate is already true. - case connectivity.Ready: - // We need to regenerate the picker even if the ring has not changed - // because we could be moving from TRANSIENT_FAILURE to READY, in which - // case, we need to update the error picker returned earlier. - b.regeneratePicker() - sendUpdate = true case connectivity.TransientFailure: // Save error to be reported via picker. b.connErr = state.ConnectionError - b.regeneratePicker() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) } - if sendUpdate { + if oldSCState != newSCState { + // Because the picker caches the state of the subconns, we always + // regenerate and update the picker when the effective SubConn state + // changes. + b.regeneratePicker() b.logger.Infof("Pushing new state %v and picker %p", b.state, b.picker) b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go index 4df2e4ed0086a..943ee7806ba18 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go @@ -103,7 +103,7 @@ func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, err type attributeKey struct{} // Equal allows the values to be compared by Attributes.Equal. -func (a AddrInfo) Equal(o interface{}) bool { +func (a AddrInfo) Equal(o any) bool { oa, ok := o.(AddrInfo) return ok && oa.LocalityWeight == a.LocalityWeight } @@ -192,8 +192,8 @@ func (b *wrrLocalityBalancer) ResolverError(err error) { b.child.ResolverError(err) } -func (b *wrrLocalityBalancer) UpdateSubConnState(sc balancer.SubConn, scState balancer.SubConnState) { - b.child.UpdateSubConnState(sc, scState) +func (b *wrrLocalityBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } func (b *wrrLocalityBalancer) Close() { diff --git a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go index b95a101116ed0..8fcb83cdbb1ea 100644 --- a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go +++ b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go @@ -26,7 +26,7 @@ import ( // BalancerConfig is the Go Native JSON representation of a balancer // configuration. -type BalancerConfig []map[string]interface{} +type BalancerConfig []map[string]any // ClusterSpecifier defines the parsing functionality of a Cluster Specifier. type ClusterSpecifier interface { diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go index 725b50a76a839..aa329d13ac303 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go @@ -297,5 +297,5 @@ func (*okStream) Header() (metadata.MD, error) { return nil, nil } func (*okStream) Trailer() metadata.MD { return nil } func (*okStream) CloseSend() error { return nil } func (o *okStream) Context() context.Context { return o.ctx } -func (*okStream) SendMsg(m interface{}) error { return io.EOF } -func (*okStream) RecvMsg(m interface{}) error { return io.EOF } +func (*okStream) SendMsg(m any) error { return io.EOF } +func (*okStream) RecvMsg(m any) error { return io.EOF } diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go index ba6fa3d788075..fda4c7f561065 100644 --- a/vendor/google.golang.org/grpc/xds/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -47,7 +47,7 @@ func (l LocalityID) ToString() (string, error) { } // Equal allows the values to be compared by Attributes.Equal. -func (l LocalityID) Equal(o interface{}) bool { +func (l LocalityID) Equal(o any) bool { ol, ok := o.(LocalityID) if !ok { return false @@ -82,4 +82,4 @@ func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { } // ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. -var ResourceTypeMapForTesting map[string]interface{} +var ResourceTypeMapForTesting map[string]any diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index d1dd79354ae0c..02470ddca5e45 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -39,7 +39,6 @@ import ( "google.golang.org/grpc/xds/internal/balancer/clustermanager" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -54,10 +53,10 @@ type serviceConfig struct { LoadBalancingConfig balancerConfig `json:"loadBalancingConfig"` } -type balancerConfig []map[string]interface{} +type balancerConfig []map[string]any -func newBalancerConfig(name string, config interface{}) balancerConfig { - return []map[string]interface{}{{name: config}} +func newBalancerConfig(name string, config any) balancerConfig { + return []map[string]any{{name: config}} } type cdsBalancerConfig struct { @@ -121,6 +120,7 @@ type routeCluster struct { type route struct { m *xdsresource.CompositeMatcher // converted from route matchers + actionType xdsresource.RouteActionType // holds route action type clusters wrr.WRR // holds *routeCluster entries maxStreamDuration time.Duration // map from filter name to its config @@ -142,6 +142,7 @@ type configSelector struct { } var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found") +var errUnsupportedClientRouteAction = status.Errorf(codes.Unavailable, "matched route does not have a supported route action type") func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { if cs == nil { @@ -155,10 +156,15 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP break } } + if rt == nil || rt.clusters == nil { return nil, errNoMatchedRouteFound } + if rt.actionType != xdsresource.RouteActionRoute { + return nil, errUnsupportedClientRouteAction + } + cluster, ok := rt.clusters.Next().(*routeCluster) if !ok { return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster) @@ -280,11 +286,6 @@ func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (ires } interceptors := make([]iresolver.ClientInterceptor, 0, len(cs.httpFilterConfig)) for _, filter := range cs.httpFilterConfig { - if router.IsRouterFilter(filter.Filter) { - // Ignore any filters after the router filter. The router itself - // is currently a nop. - return &interceptorList{interceptors: interceptors}, nil - } override := cluster.httpFilterConfigOverride[filter.Name] // cluster is highest priority if override == nil { override = rt.httpFilterConfigOverride[filter.Name] // route is second priority @@ -305,7 +306,7 @@ func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (ires interceptors = append(interceptors, i) } } - return nil, fmt.Errorf("error in xds config: no router filter present") + return &interceptorList{interceptors: interceptors}, nil } // stop decrements refs of all clusters referenced by this config selector. @@ -381,6 +382,7 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro if err != nil { return nil, err } + cs.routes[i].actionType = rt.ActionType if rt.MaxStreamDuration == nil { cs.routes[i].maxStreamDuration = su.ldsConfig.maxStreamDuration } else { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index 61adf794e9b7b..6ad61dae4ae4f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -83,6 +83,7 @@ type authority struct { // actual state of the resource. resourcesMu sync.Mutex resources map[xdsresource.Type]map[string]*resourceState + closed bool } // authorityArgs is a convenience struct to wrap arguments required to create a @@ -443,6 +444,10 @@ func (a *authority) unrefLocked() int { func (a *authority) close() { a.transport.Close() + + a.resourcesMu.Lock() + a.closed = true + a.resourcesMu.Unlock() } func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { @@ -507,10 +512,14 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w } func (a *authority) handleWatchTimerExpiry(rType xdsresource.Type, resourceName string, state *resourceState) { - a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) a.resourcesMu.Lock() defer a.resourcesMu.Unlock() + if a.closed { + return + } + a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) + switch state.wState { case watchStateRequested: // This is the only state where we need to handle the timer expiry by diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index cc39fb2e4d16e..44f6d3bc0a1cf 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -33,7 +33,6 @@ type XDSClient interface { WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() - WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() // WatchResource uses xDS to discover the resource associated with the // provided resource name. The resource type implementation determines how diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go index 2531b39472f52..925566cf44f30 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -109,7 +110,7 @@ func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *auth serializer: c.serializer, resourceTypeGetter: c.resourceTypes.get, watchExpiryTimeout: c.watchExpiryTimeout, - logger: c.logger, + logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI)), }) if err != nil { return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go index 3c3adad5341c0..e503349dbc29a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -112,37 +112,6 @@ func (c *clientImpl) WatchCluster(resourceName string, cb func(xdsresource.Clust return xdsresource.WatchCluster(c, resourceName, watcher) } -// This is only required temporarily, while we modify the -// clientImpl.WatchEndpoints API to be implemented via the wrapper -// WatchEndpoints() API which calls the WatchResource() API. -type endpointsWatcher struct { - resourceName string - cb func(xdsresource.EndpointsUpdate, error) -} - -func (c *endpointsWatcher) OnUpdate(update *xdsresource.EndpointsResourceData) { - c.cb(update.Resource, nil) -} - -func (c *endpointsWatcher) OnError(err error) { - c.cb(xdsresource.EndpointsUpdate{}, err) -} - -func (c *endpointsWatcher) OnResourceDoesNotExist() { - err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Endpoints not found in received response", c.resourceName) - c.cb(xdsresource.EndpointsUpdate{}, err) -} - -// WatchEndpoints uses EDS to discover information about the -// ClusterLoadAssignment resource identified by resourceName. -// -// WatchEndpoints can be called multiple times, with same or different -// clusterNames. Each call will start an independent watcher for the resource. -func (c *clientImpl) WatchEndpoints(resourceName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { - watcher := &endpointsWatcher{resourceName: resourceName, cb: cb} - return xdsresource.WatchEndpoints(c, resourceName, watcher) -} - // WatchResource uses xDS to discover the resource associated with the provided // resource name. The resource type implementation determines how xDS requests // are sent out and how responses are deserialized and validated. Upon receipt diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go index 551a5147b6bd1..1f266ae20185b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go @@ -277,7 +277,7 @@ func (ls *perClusterStore) stats() *Data { } sd := newData(ls.cluster, ls.service) - ls.drops.Range(func(key, val interface{}) bool { + ls.drops.Range(func(key, val any) bool { d := atomic.SwapUint64(val.(*uint64), 0) if d == 0 { return true @@ -291,7 +291,7 @@ func (ls *perClusterStore) stats() *Data { } return true }) - ls.localityRPCCount.Range(func(key, val interface{}) bool { + ls.localityRPCCount.Range(func(key, val any) bool { countData := val.(*rpcCountData) succeeded := countData.loadAndClearSucceeded() inProgress := countData.loadInProgress() @@ -308,7 +308,7 @@ func (ls *perClusterStore) stats() *Data { }, LoadStats: make(map[string]ServerLoadData), } - countData.serverLoads.Range(func(key, val interface{}) bool { + countData.serverLoads.Range(func(key, val any) bool { sum, count := val.(*rpcLoadData).loadAndClear() if count == 0 { return true diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go index e28ea0d04103f..2269cb293da9a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go @@ -25,10 +25,16 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const prefix = "[xds-client %p] " - var logger = grpclog.Component("xds") func prefixLogger(p *clientImpl) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) + return internalgrpclog.NewPrefixLogger(logger, clientPrefix(p)) +} + +func clientPrefix(p *clientImpl) string { + return fmt.Sprintf("[xds-client %p] ", p) +} + +func authorityPrefix(p *clientImpl, serverURI string) string { + return fmt.Sprintf("%s[%s] ", clientPrefix(p), serverURI) } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go index c5d5afe4ebdc4..5bf70751ed80d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go @@ -30,6 +30,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/leastrequest" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/internal/envconfig" @@ -41,6 +42,7 @@ import ( v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" + v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" v3pickfirstpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3" v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" @@ -53,13 +55,15 @@ func init() { xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst", convertPickFirstProtoToServiceConfig) xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin", convertRoundRobinProtoToServiceConfig) xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality", convertWRRLocalityProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest", convertLeastRequestProtoToServiceConfig) xdslbregistry.Register("type.googleapis.com/udpa.type.v1.TypedStruct", convertV1TypedStructToServiceConfig) xdslbregistry.Register("type.googleapis.com/xds.type.v3.TypedStruct", convertV3TypedStructToServiceConfig) } const ( - defaultRingHashMinSize = 1024 - defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + defaultLeastRequestChoiceCount = 2 ) func convertRingHashProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { @@ -177,6 +181,29 @@ func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, _ int) (json return makeBalancerConfigJSON(weightedroundrobin.Name, lbCfgJSON), nil } +func convertLeastRequestProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + if !envconfig.LeastRequestLB { + return nil, nil + } + lrProto := &v3leastrequestpb.LeastRequest{} + if err := proto.Unmarshal(rawProto, lrProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + // "The configuration for the Least Request LB policy is the + // least_request_lb_config field. The field is optional; if not present, + // defaults will be assumed for all of its values." - A48 + choiceCount := uint32(defaultLeastRequestChoiceCount) + if cc := lrProto.GetChoiceCount(); cc != nil { + choiceCount = cc.GetValue() + } + lrCfg := &leastrequest.LBConfig{ChoiceCount: choiceCount} + js, err := json.Marshal(lrCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", lrCfg, err) + } + return makeBalancerConfigJSON(leastrequest.Name, js), nil +} + func convertV1TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { tsProto := &v1xdsudpatypepb.TypedStruct{} if err := proto.Unmarshal(rawProto, tsProto); err != nil { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go index 00ef9310481a8..7bac4469b78bd 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go @@ -53,7 +53,7 @@ func (e *xdsClientError) Error() string { // NewErrorf creates an xds client error. The callbacks are called with this // error, to pass additional information about the error. -func NewErrorf(t ErrorType, format string, args ...interface{}) error { +func NewErrorf(t ErrorType, format string, args ...any) error { return &xdsClientError{t: t, desc: fmt.Sprintf(format, args...)} } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go index f67f0ea153252..7cd64201dae19 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go @@ -32,7 +32,7 @@ import ( ) func init() { - internal.ResourceTypeMapForTesting = make(map[string]interface{}) + internal.ResourceTypeMapForTesting = make(map[string]any) internal.ResourceTypeMapForTesting[version.V3ListenerURL] = listenerType internal.ResourceTypeMapForTesting[version.V3RouteConfigURL] = routeConfigType internal.ResourceTypeMapForTesting[version.V3ClusterURL] = clusterType diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go index 0fb3f274ed460..35cfa9ee76789 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go @@ -30,7 +30,7 @@ import ( // context/logic available at the xdsClient layer. Since these validation are // performed on internal update structs, they can be shared between different // API clients. -type UpdateValidatorFunc func(interface{}) error +type UpdateValidatorFunc func(any) error // UpdateMetadata contains the metadata for each update, including timestamp, // raw message, and so on. diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go index ec70f32ca4365..1254d250c99b0 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go @@ -73,11 +73,3 @@ type EndpointsUpdate struct { // Raw is the resource from the xds response. Raw *anypb.Any } - -// EndpointsUpdateErrTuple is a tuple with the update and error. It contains the -// results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type EndpointsUpdateErrTuple struct { - Update EndpointsUpdate - Err error -} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 9f8530111a735..abf95d2a40d6c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -23,6 +23,7 @@ import ( "fmt" "net" "strconv" + "strings" "time" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" @@ -75,6 +76,8 @@ const ( defaultRingHashMinSize = 1024 defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M + + defaultLeastRequestChoiceCount = 2 ) func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { @@ -103,6 +106,26 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu rhLBCfg := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) lbPolicy = []byte(fmt.Sprintf(`[{"ring_hash_experimental": %s}]`, rhLBCfg)) + case v3clusterpb.Cluster_LEAST_REQUEST: + if !envconfig.LeastRequestLB { + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + + // "The configuration for the Least Request LB policy is the + // least_request_lb_config field. The field is optional; if not present, + // defaults will be assumed for all of its values." - A48 + lr := cluster.GetLeastRequestLbConfig() + var choiceCount uint32 = defaultLeastRequestChoiceCount + if cc := lr.GetChoiceCount(); cc != nil { + choiceCount = cc.GetValue() + } + // "If choice_count < 2, the config will be rejected." - A48 + if choiceCount < 2 { + return ClusterUpdate{}, fmt.Errorf("Cluster_LeastRequestLbConfig.ChoiceCount must be >= 2, got: %v", choiceCount) + } + + lrLBCfg := []byte(fmt.Sprintf("{\"choiceCount\": %d}", choiceCount)) + lbPolicy = []byte(fmt.Sprintf(`[{"least_request_experimental": %s}]`, lrLBCfg)) default: return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } @@ -173,6 +196,9 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu } ret.ClusterType = ClusterTypeEDS ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + if strings.HasPrefix(ret.ClusterName, "xdstp:") && ret.EDSServiceName == "" { + return ClusterUpdate{}, fmt.Errorf("CDS's EDS service name is not set with a new-style cluster name: %+v", cluster) + } return ret, nil case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: if !envconfig.XDSAggregateAndDNS { diff --git a/vendor/google.golang.org/grpc/xds/server.go b/vendor/google.golang.org/grpc/xds/server.go index 55b678bb78a0c..fe2138c8bc245 100644 --- a/vendor/google.golang.org/grpc/xds/server.go +++ b/vendor/google.golang.org/grpc/xds/server.go @@ -23,7 +23,6 @@ import ( "errors" "fmt" "net" - "sync" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -64,7 +63,7 @@ var ( // grpcServer contains methods from grpc.Server which are used by the // GRPCServer type here. This is useful for overriding in unit tests. type grpcServer interface { - RegisterService(*grpc.ServiceDesc, interface{}) + RegisterService(*grpc.ServiceDesc, any) Serve(net.Listener) error Stop() GracefulStop() @@ -76,16 +75,11 @@ type grpcServer interface { // grpc.ServiceRegistrar interface and can be passed to service registration // functions in IDL generated code. type GRPCServer struct { - gs grpcServer - quit *grpcsync.Event - logger *internalgrpclog.PrefixLogger - xdsCredsInUse bool - opts *serverOptions - - // clientMu is used only in initXDSClient(), which is called at the - // beginning of Serve(), where we have to decide if we have to create a - // client or use an existing one. - clientMu sync.Mutex + gs grpcServer + quit *grpcsync.Event + logger *internalgrpclog.PrefixLogger + xdsCredsInUse bool + opts *serverOptions xdsC xdsclient.XDSClient xdsClientClose func() } @@ -93,7 +87,7 @@ type GRPCServer struct { // NewGRPCServer creates an xDS-enabled gRPC server using the passed in opts. // The underlying gRPC server has no service registered and has not started to // accept requests yet. -func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { +func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) { newOpts := []grpc.ServerOption{ grpc.ChainUnaryInterceptor(xdsUnaryInterceptor), grpc.ChainStreamInterceptor(xdsStreamInterceptor), @@ -103,8 +97,6 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { gs: newGRPCServer(newOpts...), quit: grpcsync.NewEvent(), } - s.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, s)) - s.logger.Infof("Created xds.GRPCServer") s.handleServerOptions(opts) // We type assert our underlying gRPC server to the real grpc.Server here @@ -119,8 +111,48 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { } } + // Initializing the xDS client upfront (instead of at serving time) + // simplifies the code by eliminating the need for a mutex to protect the + // xdsC and xdsClientClose fields. + newXDSClient := newXDSClient + if s.opts.bootstrapContentsForTesting != nil { + // Bootstrap file contents may be specified as a server option for tests. + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return xdsclient.NewWithBootstrapContentsForTesting(s.opts.bootstrapContentsForTesting) + } + } + xdsClient, xdsClientClose, err := newXDSClient() + if err != nil { + return nil, fmt.Errorf("xDS client creation failed: %v", err) + } + + // Validate the bootstrap configuration for server specific fields. + + // Listener resource name template is mandatory on the server side. + cfg := xdsClient.BootstrapConfig() + if cfg.ServerListenerResourceNameTemplate == "" { + xdsClientClose() + return nil, errors.New("missing server_listener_resource_name_template in the bootstrap configuration") + } + + // If xds credentials were specified by the user, but bootstrap configs do + // not contain any certificate provider configuration, it is better to fail + // right now rather than failing when attempting to create certificate + // providers after receiving an LDS response with security configuration. + if s.xdsCredsInUse { + if len(cfg.CertProviderConfigs) == 0 { + xdsClientClose() + return nil, fmt.Errorf("xds credentials are passed to the user, but certificate_providers config is missing in the bootstrap configuration") + } + } + s.xdsC = xdsClient + s.xdsClientClose = xdsClientClose + + s.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, s)) + s.logger.Infof("Created xds.GRPCServer") s.logger.Infof("xDS credentials in use: %v", s.xdsCredsInUse) - return s + + return s, nil } // handleServerOptions iterates through the list of server options passed in by @@ -159,7 +191,7 @@ func (s *GRPCServer) loggingServerModeChangeCallback(addr net.Addr, args Serving // RegisterService registers a service and its implementation to the underlying // gRPC server. It is called from the IDL generated code. This must be called // before invoking Serve. -func (s *GRPCServer) RegisterService(sd *grpc.ServiceDesc, ss interface{}) { +func (s *GRPCServer) RegisterService(sd *grpc.ServiceDesc, ss any) { s.gs.RegisterService(sd, ss) } @@ -169,32 +201,6 @@ func (s *GRPCServer) GetServiceInfo() map[string]grpc.ServiceInfo { return s.gs.GetServiceInfo() } -// initXDSClient creates a new xdsClient if there is no existing one available. -func (s *GRPCServer) initXDSClient() error { - s.clientMu.Lock() - defer s.clientMu.Unlock() - - if s.xdsC != nil { - return nil - } - - newXDSClient := newXDSClient - if s.opts.bootstrapContentsForTesting != nil { - // Bootstrap file contents may be specified as a server option for tests. - newXDSClient = func() (xdsclient.XDSClient, func(), error) { - return xdsclient.NewWithBootstrapContentsForTesting(s.opts.bootstrapContentsForTesting) - } - } - - client, close, err := newXDSClient() - if err != nil { - return fmt.Errorf("xds: failed to create xds-client: %v", err) - } - s.xdsC = client - s.xdsClientClose = close - return nil -} - // Serve gets the underlying gRPC server to accept incoming connections on the // listener lis, which is expected to be listening on a TCP port. // @@ -208,35 +214,16 @@ func (s *GRPCServer) Serve(lis net.Listener) error { return fmt.Errorf("xds: GRPCServer expects listener to return a net.TCPAddr. Got %T", lis.Addr()) } - // If this is the first time Serve() is being called, we need to initialize - // our xdsClient. If not, we can use the existing one. - if err := s.initXDSClient(); err != nil { - return err - } - cfg := s.xdsC.BootstrapConfig() - if cfg == nil { - return errors.New("bootstrap configuration is empty") - } - - // If xds credentials were specified by the user, but bootstrap configs do - // not contain any certificate provider configuration, it is better to fail - // right now rather than failing when attempting to create certificate - // providers after receiving an LDS response with security configuration. - if s.xdsCredsInUse { - if len(cfg.CertProviderConfigs) == 0 { - return errors.New("xds: certificate_providers config missing in bootstrap file") - } + if s.quit.HasFired() { + return grpc.ErrServerStopped } // The server listener resource name template from the bootstrap // configuration contains a template for the name of the Listener resource // to subscribe to for a gRPC server. If the token `%s` is present in the // string, it will be replaced with the server's listening "IP:port" (e.g., - // "0.0.0.0:8080", "[::]:8080"). The absence of a template will be treated - // as an error since we do not have any default value for this. - if cfg.ServerListenerResourceNameTemplate == "" { - return errors.New("missing server_listener_resource_name_template in the bootstrap configuration") - } + // "0.0.0.0:8080", "[::]:8080"). + cfg := s.xdsC.BootstrapConfig() name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate, lis.Addr().String()) modeUpdateCh := buffer.NewUnbounded() @@ -407,7 +394,7 @@ func routeAndProcess(ctx context.Context) error { // xdsUnaryInterceptor is the unary interceptor added to the gRPC server to // perform any xDS specific functionality on unary RPCs. -func xdsUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { +func xdsUnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { if envconfig.XDSRBAC { if err := routeAndProcess(ctx); err != nil { return nil, err @@ -418,7 +405,7 @@ func xdsUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServ // xdsStreamInterceptor is the stream interceptor added to the gRPC server to // perform any xDS specific functionality on streaming RPCs. -func xdsStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +func xdsStreamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if envconfig.XDSRBAC { if err := routeAndProcess(ss.Context()); err != nil { return err diff --git a/vendor/modules.txt b/vendor/modules.txt index 3f4f0e17771dc..2689b10acdd00 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go v0.110.4 +# cloud.google.com/go v0.110.7 ## explicit; go 1.19 cloud.google.com/go cloud.google.com/go/internal @@ -13,7 +13,7 @@ cloud.google.com/go/bigtable cloud.google.com/go/bigtable/bttest cloud.google.com/go/bigtable/internal cloud.google.com/go/bigtable/internal/option -# cloud.google.com/go/compute v1.22.0 +# cloud.google.com/go/compute v1.23.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 @@ -28,7 +28,7 @@ cloud.google.com/go/iam/apiv1/iampb cloud.google.com/go/longrunning cloud.google.com/go/longrunning/autogen cloud.google.com/go/longrunning/autogen/longrunningpb -# cloud.google.com/go/pubsub v1.32.0 +# cloud.google.com/go/pubsub v1.33.0 ## explicit; go 1.19 cloud.google.com/go/pubsub cloud.google.com/go/pubsub/apiv1 @@ -606,6 +606,7 @@ github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3 @@ -1167,6 +1168,9 @@ github.com/oklog/run # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.86.0 +## explicit; go 1.20 +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -1495,8 +1499,11 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 -## explicit; go 1.19 +# go.opentelemetry.io/collector/featuregate v1.0.0-rcv0015 +## explicit; go 1.20 +go.opentelemetry.io/collector/featuregate +# go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 +## explicit; go 1.20 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1 @@ -1510,15 +1517,16 @@ go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1 go.opentelemetry.io/collector/pdata/internal/json go.opentelemetry.io/collector/pdata/internal/otlp go.opentelemetry.io/collector/pdata/pcommon +go.opentelemetry.io/collector/pdata/plog +go.opentelemetry.io/collector/pdata/plog/plogotlp go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp # go.opentelemetry.io/collector/semconv v0.81.0 ## explicit; go 1.19 go.opentelemetry.io/collector/semconv/v1.6.1 -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 ## explicit; go 1.19 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil # go.opentelemetry.io/otel v1.18.0 ## explicit; go 1.20 go.opentelemetry.io/otel @@ -1530,7 +1538,9 @@ go.opentelemetry.io/otel/internal/attribute go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/internal/v2 go.opentelemetry.io/otel/semconv/v1.17.0 +go.opentelemetry.io/otel/semconv/v1.17.0/httpconv # go.opentelemetry.io/otel/metric v1.18.0 ## explicit; go 1.20 go.opentelemetry.io/otel/metric @@ -1717,7 +1727,7 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 +# google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb ## explicit; go 1.19 google.golang.org/genproto/googleapis/bigtable/admin/v2 google.golang.org/genproto/googleapis/bigtable/v2 @@ -1727,18 +1737,18 @@ google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 +# google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.56.3 -## explicit; go 1.17 +# google.golang.org/grpc v1.58.2 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/authz/audit @@ -1749,6 +1759,7 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/leastrequest google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/balancer/weightedroundrobin google.golang.org/grpc/balancer/weightedroundrobin/internal @@ -1796,6 +1807,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/hierarchy +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/proto/grpc_lookup_v1 From 6948c4a179a0b7763aab76d5fd34e67d9fffbf3a Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Mon, 23 Oct 2023 13:24:51 +0200 Subject: [PATCH 15/33] Turn frontend Tripperware into a Middleware. (#10688) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **What this PR does / why we need it**: Currently, a request to Loki's frontend API goes through these conversions: ``` http.Request ↓ limitedRoundTripper queryrangebase.Request ↓ queryrangebase.Middlware … ↓ queryrangebase.Request ↓ limitedRoundTripper http.Request ↓ grpcRoundTripperAdapter httpgrpc ↓ grpcRoundTripperAdapter http.Response ↓ limitedRoundTripper queryrangebase.Response ↓ limitedRoundtripper http.Response ``` Since `httgrpc` and `queryrangebase.Request` are Protobufs there's no good reason to encode and decode them to HTTP responses/requests. Furthermore, the encoding to HTTP makes it harder for us to encode query plans. Thus the conversions are changed to the following: ``` http.Request ↓ queryrangebase.Request ↓ queryrangebase.Middlware … ↓ queryrangebase.Request ↓ httpgrpc ↓ queryrangebase.Response ↓ http.Response ``` In order to achieve this the `http.RoundTripper` is pushed to the outside. Only the serialization layer from `http.Request` to `queryrangebase.Request` and `http.Response` to `queryrangebase.Response` will be an `http.RoundTripper`. Everything else is either a `queryrangebase.Handler` or `queryrangebase.Middleware`. **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) --- pkg/loki/loki.go | 19 +- pkg/loki/modules.go | 23 +- pkg/lokifrontend/frontend/config.go | 9 +- .../frontend/downstream_roundtripper.go | 36 +- .../frontend/transport/handler.go | 34 + .../frontend/transport/roundtripper.go | 48 +- pkg/lokifrontend/frontend/v1/frontend_test.go | 11 +- pkg/lokifrontend/frontend/v2/frontend.go | 128 +++- pkg/lokifrontend/frontend/v2/frontend_test.go | 3 +- pkg/querier/limits/definitions.go | 22 + pkg/querier/querier.go | 17 +- pkg/querier/querier_test.go | 2 +- pkg/querier/queryrange/codec.go | 73 +-- pkg/querier/queryrange/codec_test.go | 13 + pkg/querier/queryrange/downstreamer.go | 61 -- pkg/querier/queryrange/limits.go | 88 +-- pkg/querier/queryrange/limits/defitions.go | 32 + pkg/querier/queryrange/limits_test.go | 191 +++--- pkg/querier/queryrange/marshal.go | 60 +- .../queryrange/queryrangebase/roundtrip.go | 80 --- pkg/querier/queryrange/querysharding.go | 1 - pkg/querier/queryrange/querysharding_test.go | 2 +- pkg/querier/queryrange/roundtrip.go | 440 ++++++------- pkg/querier/queryrange/roundtrip_test.go | 589 ++++++------------ pkg/querier/queryrange/volume.go | 23 - pkg/util/limiter/combined_limits.go | 8 +- pkg/util/querylimits/propagation.go | 9 +- pkg/util/querylimits/tripperware.go | 51 -- 28 files changed, 844 insertions(+), 1229 deletions(-) create mode 100644 pkg/querier/limits/definitions.go create mode 100644 pkg/querier/queryrange/limits/defitions.go delete mode 100644 pkg/util/querylimits/tripperware.go diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 0b09ee0491849..718b5fd13c195 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -312,7 +312,7 @@ type Loki struct { runtimeConfig *runtimeconfig.Manager MemberlistKV *memberlist.KVInitService compactor *compactor.Compactor - QueryFrontEndTripperware queryrangebase.Tripperware + QueryFrontEndMiddleware queryrangebase.Middleware queryScheduler *scheduler.Scheduler querySchedulerRingManager *lokiring.RingManager usageReport *analytics.Reporter @@ -590,7 +590,7 @@ func (t *Loki) setupModuleManager() error { mm.RegisterModule(Ingester, t.initIngester) mm.RegisterModule(Querier, t.initQuerier) mm.RegisterModule(IngesterQuerier, t.initIngesterQuerier) - mm.RegisterModule(QueryFrontendTripperware, t.initQueryFrontendTripperware, modules.UserInvisibleModule) + mm.RegisterModule(QueryFrontendTripperware, t.initQueryFrontendMiddleware, modules.UserInvisibleModule) mm.RegisterModule(QueryFrontend, t.initQueryFrontend) mm.RegisterModule(RulerStorage, t.initRulerStorage, modules.UserInvisibleModule) mm.RegisterModule(Ruler, t.initRuler) @@ -653,26 +653,17 @@ func (t *Loki) setupModuleManager() error { level.Debug(util_log.Logger).Log("msg", "per-query request limits support enabled") mm.RegisterModule(QueryLimiter, t.initQueryLimiter, modules.UserInvisibleModule) mm.RegisterModule(QueryLimitsInterceptors, t.initQueryLimitsInterceptors, modules.UserInvisibleModule) - mm.RegisterModule(QueryLimitsTripperware, t.initQueryLimitsTripperware, modules.UserInvisibleModule) + + // This module is defunct but the target remains for backwards compatibility. + mm.RegisterModule(QueryLimitsTripperware, func() (services.Service, error) { return nil, nil }, modules.UserInvisibleModule) // Ensure query limiter embeds overrides after they've been // created. deps[QueryLimiter] = []string{Overrides} deps[QueryLimitsInterceptors] = []string{} - // Ensure query limits tripperware embeds the query frontend - // tripperware after it's been created. Any additional - // middleware/tripperware you want to add to the querier or - // frontend must happen inject a dependence on the query limits - // tripperware. - deps[QueryLimitsTripperware] = []string{QueryFrontendTripperware} - deps[Querier] = append(deps[Querier], QueryLimiter) - // The frontend receives a tripperware. Make sure it uses the - // wrapped one. - deps[QueryFrontend] = append(deps[QueryFrontend], QueryLimitsTripperware) - // query frontend tripperware uses t.Overrides. Make sure it // uses the one wrapped by query limiter. deps[QueryFrontendTripperware] = append(deps[QueryFrontendTripperware], QueryLimiter) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index f5a46e2164ed4..dd53f3f262195 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -781,10 +781,10 @@ type disabledShuffleShardingLimits struct{} func (disabledShuffleShardingLimits) MaxQueriersPerUser(_ string) int { return 0 } -func (t *Loki) initQueryFrontendTripperware() (_ services.Service, err error) { +func (t *Loki) initQueryFrontendMiddleware() (_ services.Service, err error) { level.Debug(util_log.Logger).Log("msg", "initializing query frontend tripperware") - tripperware, stopper, err := queryrange.NewTripperware( + middleware, stopper, err := queryrange.NewMiddleware( t.Cfg.QueryRange, t.Cfg.Querier.Engine, util_log.Logger, @@ -797,7 +797,7 @@ func (t *Loki) initQueryFrontendTripperware() (_ services.Service, err error) { return } t.stopper = stopper - t.QueryFrontEndTripperware = tripperware + t.QueryFrontEndMiddleware = middleware return services.NewIdleService(nil, nil), nil } @@ -864,13 +864,15 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { FrontendV2: t.Cfg.Frontend.FrontendV2, DownstreamURL: t.Cfg.Frontend.DownstreamURL, } - roundTripper, frontendV1, frontendV2, err := frontend.InitFrontend( + frontendTripper, frontendV1, frontendV2, err := frontend.InitFrontend( combinedCfg, scheduler.SafeReadRing(t.Cfg.QueryScheduler, t.querySchedulerRingManager), disabledShuffleShardingLimits{}, t.Cfg.Server.GRPCListenPort, util_log.Logger, - prometheus.DefaultRegisterer) + prometheus.DefaultRegisterer, + queryrange.DefaultCodec, + ) if err != nil { return nil, err } @@ -887,7 +889,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { level.Debug(util_log.Logger).Log("msg", "no query frontend configured") } - roundTripper = t.QueryFrontEndTripperware(roundTripper) + roundTripper := queryrange.NewSerializeRoundTripper(t.QueryFrontEndMiddleware.Wrap(frontendTripper), queryrange.DefaultCodec) frontendHandler := transport.NewHandler(t.Cfg.Frontend.Handler, roundTripper, util_log.Logger, prometheus.DefaultRegisterer) if t.Cfg.Frontend.CompressResponses { @@ -1477,15 +1479,6 @@ func (t *Loki) initQueryLimitsInterceptors() (services.Service, error) { return nil, nil } -func (t *Loki) initQueryLimitsTripperware() (services.Service, error) { - _ = level.Debug(util_log.Logger).Log("msg", "initializing query limits tripperware") - t.QueryFrontEndTripperware = querylimits.WrapTripperware( - t.QueryFrontEndTripperware, - ) - - return nil, nil -} - func (t *Loki) initAnalytics() (services.Service, error) { if !t.Cfg.Analytics.Enabled { return nil, nil diff --git a/pkg/lokifrontend/frontend/config.go b/pkg/lokifrontend/frontend/config.go index cdbfca34ac6f9..290c097de2669 100644 --- a/pkg/lokifrontend/frontend/config.go +++ b/pkg/lokifrontend/frontend/config.go @@ -12,6 +12,7 @@ import ( "github.com/grafana/loki/pkg/lokifrontend/frontend/transport" v1 "github.com/grafana/loki/pkg/lokifrontend/frontend/v1" v2 "github.com/grafana/loki/pkg/lokifrontend/frontend/v2" + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/util" ) @@ -38,7 +39,7 @@ func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { // Returned RoundTripper can be wrapped in more round-tripper middlewares, and then eventually registered // into HTTP server using the Handler from this package. Returned RoundTripper is always non-nil // (if there are no errors), and it uses the returned frontend (if any). -func InitFrontend(cfg CombinedFrontendConfig, ring ring.ReadRing, limits v1.Limits, grpcListenPort int, log log.Logger, reg prometheus.Registerer) (http.RoundTripper, *v1.Frontend, *v2.Frontend, error) { +func InitFrontend(cfg CombinedFrontendConfig, ring ring.ReadRing, limits v1.Limits, grpcListenPort int, log log.Logger, reg prometheus.Registerer, codec transport.Codec) (queryrangebase.Handler, *v1.Frontend, *v2.Frontend, error) { switch { case cfg.DownstreamURL != "": // If the user has specified a downstream Prometheus, then we should use that. @@ -59,8 +60,8 @@ func InitFrontend(cfg CombinedFrontendConfig, ring ring.ReadRing, limits v1.Limi cfg.FrontendV2.Port = grpcListenPort } - fr, err := v2.NewFrontend(cfg.FrontendV2, ring, log, reg) - return transport.AdaptGrpcRoundTripperToHTTPRoundTripper(fr), nil, fr, err + fr, err := v2.NewFrontend(cfg.FrontendV2, ring, log, reg, codec) + return fr, nil, fr, err default: // No scheduler = use original frontend. @@ -68,6 +69,6 @@ func InitFrontend(cfg CombinedFrontendConfig, ring ring.ReadRing, limits v1.Limi if err != nil { return nil, nil, nil, err } - return transport.AdaptGrpcRoundTripperToHTTPRoundTripper(fr), fr, nil, nil + return transport.AdaptGrpcRoundTripperToHandler(fr, codec), fr, nil, nil } } diff --git a/pkg/lokifrontend/frontend/downstream_roundtripper.go b/pkg/lokifrontend/frontend/downstream_roundtripper.go index d52ced81938ab..90f330900c32b 100644 --- a/pkg/lokifrontend/frontend/downstream_roundtripper.go +++ b/pkg/lokifrontend/frontend/downstream_roundtripper.go @@ -1,20 +1,26 @@ package frontend import ( + "context" + "fmt" "net/http" "net/url" "path" + "github.com/grafana/dskit/user" "github.com/opentracing/opentracing-go" + + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" ) // RoundTripper that forwards requests to downstream URL. type downstreamRoundTripper struct { downstreamURL *url.URL transport http.RoundTripper + codec queryrangebase.Codec } -func NewDownstreamRoundTripper(downstreamURL string, transport http.RoundTripper) (http.RoundTripper, error) { +func NewDownstreamRoundTripper(downstreamURL string, transport http.RoundTripper) (queryrangebase.Handler, error) { u, err := url.Parse(downstreamURL) if err != nil { return nil, err @@ -23,8 +29,19 @@ func NewDownstreamRoundTripper(downstreamURL string, transport http.RoundTripper return &downstreamRoundTripper{downstreamURL: u, transport: transport}, nil } -func (d downstreamRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(r.Context()) +func (d downstreamRoundTripper) Do(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) + + var r *http.Request + + r, err := d.codec.EncodeRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("connot convert request ot HTTP request: %w", err) + } + if err := user.InjectOrgIDIntoHTTPRequest(ctx, r); err != nil { + return nil, err + } + if tracer != nil && span != nil { carrier := opentracing.HTTPHeadersCarrier(r.Header) err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) @@ -37,5 +54,16 @@ func (d downstreamRoundTripper) RoundTrip(r *http.Request) (*http.Response, erro r.URL.Host = d.downstreamURL.Host r.URL.Path = path.Join(d.downstreamURL.Path, r.URL.Path) r.Host = "" - return d.transport.RoundTrip(r) + + httpResp, err := d.transport.RoundTrip(r) + if err != nil { + return nil, err + } + + resp, err := d.codec.DecodeResponse(ctx, httpResp, req) + if err != nil { + return nil, fmt.Errorf("cannot convert HTTP response to response: %w", err) + } + + return resp, nil } diff --git a/pkg/lokifrontend/frontend/transport/handler.go b/pkg/lokifrontend/frontend/transport/handler.go index 8cd8ca0bbd8e0..92c29cc896443 100644 --- a/pkg/lokifrontend/frontend/transport/handler.go +++ b/pkg/lokifrontend/frontend/transport/handler.go @@ -16,11 +16,13 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/httpgrpc/server" + "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/grafana/dskit/tenant" + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" querier_stats "github.com/grafana/loki/pkg/querier/stats" "github.com/grafana/loki/pkg/util" util_log "github.com/grafana/loki/pkg/util/log" @@ -252,3 +254,35 @@ func statsValue(name string, d time.Duration) string { durationInMs := strconv.FormatFloat(float64(d)/float64(time.Millisecond), 'f', -1, 64) return name + ";dur=" + durationInMs } + +func AdaptGrpcRoundTripperToHandler(r GrpcRoundTripper, codec Codec) queryrangebase.Handler { + return &grpcRoundTripperToHandlerAdapter{roundTripper: r, codec: codec} +} + +// This adapter wraps GrpcRoundTripper and converts it into a queryrangebase.Handler +type grpcRoundTripperToHandlerAdapter struct { + roundTripper GrpcRoundTripper + codec Codec +} + +func (a *grpcRoundTripperToHandlerAdapter) Do(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + httpReq, err := a.codec.EncodeRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("cannot convert request to HTTP request: %w", err) + } + if err := user.InjectOrgIDIntoHTTPRequest(ctx, httpReq); err != nil { + return nil, err + } + + grpcReq, err := server.HTTPRequest(httpReq) + if err != nil { + return nil, fmt.Errorf("cannot convert HTTP request to gRPC request: %w", err) + } + + grpcResp, err := a.roundTripper.RoundTripGRPC(ctx, grpcReq) + if err != nil { + return nil, err + } + + return a.codec.DecodeHTTPGrpcResponse(grpcResp, req) +} diff --git a/pkg/lokifrontend/frontend/transport/roundtripper.go b/pkg/lokifrontend/frontend/transport/roundtripper.go index 8d8993649555c..58f9d13aa98fe 100644 --- a/pkg/lokifrontend/frontend/transport/roundtripper.go +++ b/pkg/lokifrontend/frontend/transport/roundtripper.go @@ -1,13 +1,11 @@ package transport import ( - "bytes" "context" - "io" - "net/http" "github.com/grafana/dskit/httpgrpc" - "github.com/grafana/dskit/httpgrpc/server" + + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" ) // GrpcRoundTripper is similar to http.RoundTripper, but works with HTTP requests converted to protobuf messages. @@ -15,43 +13,7 @@ type GrpcRoundTripper interface { RoundTripGRPC(context.Context, *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) } -func AdaptGrpcRoundTripperToHTTPRoundTripper(r GrpcRoundTripper) http.RoundTripper { - return &grpcRoundTripperAdapter{roundTripper: r} -} - -// This adapter wraps GrpcRoundTripper and converted it into http.RoundTripper -type grpcRoundTripperAdapter struct { - roundTripper GrpcRoundTripper -} - -type buffer struct { - buff []byte - io.ReadCloser -} - -func (b *buffer) Bytes() []byte { - return b.buff -} - -func (a *grpcRoundTripperAdapter) RoundTrip(r *http.Request) (*http.Response, error) { - req, err := server.HTTPRequest(r) - if err != nil { - return nil, err - } - - resp, err := a.roundTripper.RoundTripGRPC(r.Context(), req) - if err != nil { - return nil, err - } - - httpResp := &http.Response{ - StatusCode: int(resp.Code), - Body: &buffer{buff: resp.Body, ReadCloser: io.NopCloser(bytes.NewReader(resp.Body))}, - Header: http.Header{}, - ContentLength: int64(len(resp.Body)), - } - for _, h := range resp.Headers { - httpResp.Header[h.Key] = h.Values - } - return httpResp, nil +type Codec interface { + queryrangebase.Codec + DecodeHTTPGrpcResponse(r *httpgrpc.HTTPResponse, req queryrangebase.Request) (queryrangebase.Response, error) } diff --git a/pkg/lokifrontend/frontend/v1/frontend_test.go b/pkg/lokifrontend/frontend/v1/frontend_test.go index cbe34776e6ecf..bd417f4885985 100644 --- a/pkg/lokifrontend/frontend/v1/frontend_test.go +++ b/pkg/lokifrontend/frontend/v1/frontend_test.go @@ -28,6 +28,7 @@ import ( "go.uber.org/atomic" "google.golang.org/grpc" + "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/lokifrontend/frontend/transport" "github.com/grafana/loki/pkg/lokifrontend/frontend/v1/frontendv1pb" "github.com/grafana/loki/pkg/querier/queryrange" @@ -44,7 +45,7 @@ const ( func TestFrontend(t *testing.T) { handler := queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { - return &queryrange.LokiLabelNamesResponse{Data: []string{"Hello", "world"}}, nil + return &queryrange.LokiLabelNamesResponse{Data: []string{"Hello", "world"}, Version: uint32(loghttp.VersionV1)}, nil }) test := func(addr string, _ *Frontend) { req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/%s", addr, labelQuery), nil) @@ -81,7 +82,7 @@ func TestFrontendPropagateTrace(t *testing.T) { traceID := fmt.Sprintf("%v", sp.Context().(jaeger.SpanContext).TraceID()) observedTraceID <- traceID - return &queryrange.LokiLabelNamesResponse{Data: []string{"Hello", "world"}}, nil + return &queryrange.LokiLabelNamesResponse{Data: []string{"Hello", "world"}, Version: uint32(loghttp.VersionV1)}, nil }) test := func(addr string, _ *Frontend) { @@ -186,7 +187,7 @@ func TestFrontendCancel(t *testing.T) { func TestFrontendMetricsCleanup(t *testing.T) { handler := queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { - return &queryrange.LokiLabelNamesResponse{Data: []string{"Hello", "world"}}, nil + return &queryrange.LokiLabelNamesResponse{Data: []string{"Hello", "world"}, Version: uint32(loghttp.VersionV1)}, nil }) for _, matchMaxConcurrency := range []bool{false, true} { @@ -260,12 +261,12 @@ func testFrontend(t *testing.T, config Config, handler queryrangebase.Handler, t handlerCfg := transport.HandlerConfig{} flagext.DefaultValues(&handlerCfg) - rt := transport.AdaptGrpcRoundTripperToHTTPRoundTripper(v1) + rt := queryrange.NewSerializeHTTPHandler(transport.AdaptGrpcRoundTripperToHandler(v1, queryrange.DefaultCodec), queryrange.DefaultCodec) r := mux.NewRouter() r.PathPrefix("/").Handler(middleware.Merge( middleware.AuthenticateUser, middleware.Tracer{}, - ).Wrap(transport.NewHandler(handlerCfg, rt, logger, nil))) + ).Wrap(rt)) httpServer := http.Server{ Handler: r, diff --git a/pkg/lokifrontend/frontend/v2/frontend.go b/pkg/lokifrontend/frontend/v2/frontend.go index c085cb86c43d1..0e36d3765ed43 100644 --- a/pkg/lokifrontend/frontend/v2/frontend.go +++ b/pkg/lokifrontend/frontend/v2/frontend.go @@ -14,9 +14,11 @@ import ( "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/grpcclient" "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/httpgrpc/server" "github.com/grafana/dskit/netutil" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" + "github.com/grafana/dskit/user" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -25,7 +27,9 @@ import ( "github.com/grafana/dskit/tenant" + "github.com/grafana/loki/pkg/lokifrontend/frontend/transport" "github.com/grafana/loki/pkg/lokifrontend/frontend/v2/frontendv2pb" + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/querier/stats" lokigrpc "github.com/grafana/loki/pkg/util/httpgrpc" "github.com/grafana/loki/pkg/util/httpreq" @@ -77,8 +81,13 @@ type Frontend struct { schedulerWorkers *frontendSchedulerWorkers requests *requestsInProgress + + codec transport.Codec } +var _ queryrangebase.Handler = &Frontend{} +var _ transport.GrpcRoundTripper = &Frontend{} + type frontendRequest struct { queryID uint64 request *httpgrpc.HTTPRequest @@ -109,7 +118,7 @@ type enqueueResult struct { } // NewFrontend creates a new frontend. -func NewFrontend(cfg Config, ring ring.ReadRing, log log.Logger, reg prometheus.Registerer) (*Frontend, error) { +func NewFrontend(cfg Config, ring ring.ReadRing, log log.Logger, reg prometheus.Registerer, codec transport.Codec) (*Frontend, error) { requestsCh := make(chan *frontendRequest) schedulerWorkers, err := newFrontendSchedulerWorkers(cfg, fmt.Sprintf("%s:%d", cfg.Addr, cfg.Port), ring, requestsCh, log) @@ -123,6 +132,7 @@ func NewFrontend(cfg Config, ring ring.ReadRing, log log.Logger, reg prometheus. requestsCh: requestsCh, schedulerWorkers: schedulerWorkers, requests: newRequestsInProgress(), + codec: codec, } // Randomize to avoid getting responses from queries sent before restart, which could lead to mixing results // between different queries. Note that frontend verifies the user, so it cannot leak results between tenants. @@ -219,32 +229,79 @@ func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) response: make(chan *frontendv2pb.QueryResultRequest, 1), } - f.requests.put(freq) + cancelCh, err := f.enqueue(ctx, freq) defer f.requests.delete(freq.queryID) + if err != nil { + return nil, err + } - retries := f.cfg.WorkerConcurrency + 1 // To make sure we hit at least two different schedulers. - -enqueueAgain: - var cancelCh chan<- uint64 select { case <-ctx.Done(): + if cancelCh != nil { + select { + case cancelCh <- freq.queryID: + // cancellation sent. + default: + // failed to cancel, ignore. + level.Warn(f.log).Log("msg", "failed to send cancellation request to scheduler, queue full") + } + } return nil, ctx.Err() - case f.requestsCh <- freq: - // Enqueued, let's wait for response. - enqRes := <-freq.enqueue - - if enqRes.status == waitForResponse { - cancelCh = enqRes.cancelCh - break // go wait for response. - } else if enqRes.status == failed { - retries-- - if retries > 0 { - goto enqueueAgain - } + case resp := <-freq.response: + if stats.ShouldTrackHTTPGRPCResponse(resp.HttpResponse) { + stats := stats.FromContext(ctx) + stats.Merge(resp.Stats) // Safe if stats is nil. } - return nil, httpgrpc.Errorf(http.StatusInternalServerError, "failed to enqueue request") + return resp.HttpResponse, nil + } +} + +// Do implements queryrangebase.Handler analogous to RoundTripGRPC. +func (f *Frontend) Do(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, err + } + tenantID := tenant.JoinTenantIDs(tenantIDs) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // For backwards comaptibility we are sending both encodings + httpReq, err := f.codec.EncodeRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("connot convert request to HTTP request: %w", err) + } + + if err := user.InjectOrgIDIntoHTTPRequest(ctx, httpReq); err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + httpgrpcReq, err := server.HTTPRequest(httpReq) + if err != nil { + return nil, fmt.Errorf("connot convert HTTP request to gRPC request: %w", err) + } + + freq := &frontendRequest{ + queryID: f.lastQueryID.Inc(), + request: httpgrpcReq, + tenantID: tenantID, + actor: httpreq.ExtractActorPath(ctx), + statsEnabled: stats.IsEnabled(ctx), + + cancel: cancel, + + // Buffer of 1 to ensure response or error can be written to the channel + // even if this goroutine goes away due to client context cancellation. + enqueue: make(chan enqueueResult, 1), + response: make(chan *frontendv2pb.QueryResultRequest, 1), + } + + cancelCh, err := f.enqueue(ctx, freq) + defer f.requests.delete(freq.queryID) + if err != nil { + return nil, err } select { @@ -266,8 +323,39 @@ enqueueAgain: stats.Merge(resp.Stats) // Safe if stats is nil. } - return resp.HttpResponse, nil + return f.codec.DecodeHTTPGrpcResponse(resp.HttpResponse, req) + } +} + +func (f *Frontend) enqueue(ctx context.Context, freq *frontendRequest) (chan<- uint64, error) { + f.requests.put(freq) + + retries := f.cfg.WorkerConcurrency + 1 // To make sure we hit at least two different schedulers. + +enqueueAgain: + var cancelCh chan<- uint64 + select { + case <-ctx.Done(): + return cancelCh, ctx.Err() + + case f.requestsCh <- freq: + // Enqueued, let's wait for response. + enqRes := <-freq.enqueue + + if enqRes.status == waitForResponse { + cancelCh = enqRes.cancelCh + break // go wait for response. + } else if enqRes.status == failed { + retries-- + if retries > 0 { + goto enqueueAgain + } + } + + return cancelCh, httpgrpc.Errorf(http.StatusInternalServerError, "failed to enqueue request") } + + return cancelCh, nil } func (f *Frontend) QueryResult(ctx context.Context, qrReq *frontendv2pb.QueryResultRequest) (*frontendv2pb.QueryResultResponse, error) { diff --git a/pkg/lokifrontend/frontend/v2/frontend_test.go b/pkg/lokifrontend/frontend/v2/frontend_test.go index 0c4223747eb09..d70a51852672f 100644 --- a/pkg/lokifrontend/frontend/v2/frontend_test.go +++ b/pkg/lokifrontend/frontend/v2/frontend_test.go @@ -19,6 +19,7 @@ import ( "google.golang.org/grpc" "github.com/grafana/loki/pkg/lokifrontend/frontend/v2/frontendv2pb" + "github.com/grafana/loki/pkg/querier/queryrange" "github.com/grafana/loki/pkg/querier/stats" "github.com/grafana/loki/pkg/scheduler/schedulerpb" "github.com/grafana/loki/pkg/util/test" @@ -46,7 +47,7 @@ func setupFrontend(t *testing.T, schedulerReplyFunc func(f *Frontend, msg *sched cfg.Port = grpcPort logger := log.NewNopLogger() - f, err := NewFrontend(cfg, nil, logger, nil) + f, err := NewFrontend(cfg, nil, logger, nil, queryrange.DefaultCodec) require.NoError(t, err) frontendv2pb.RegisterFrontendForQuerierServer(server, f) diff --git a/pkg/querier/limits/definitions.go b/pkg/querier/limits/definitions.go new file mode 100644 index 0000000000000..cda30b116976d --- /dev/null +++ b/pkg/querier/limits/definitions.go @@ -0,0 +1,22 @@ +package limists + +import ( + "context" + "time" + + "github.com/grafana/loki/pkg/logql" +) + +type TimeRangeLimits interface { + MaxQueryLookback(context.Context, string) time.Duration + MaxQueryLength(context.Context, string) time.Duration +} + +type Limits interface { + logql.Limits + TimeRangeLimits + QueryTimeout(context.Context, string) time.Duration + MaxStreamsMatchersPerQuery(context.Context, string) int + MaxConcurrentTailRequests(context.Context, string) int + MaxEntriesLimitPerQuery(context.Context, string) int +} diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index f2333415e6e42..8295f02c644c0 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -27,6 +27,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/syntax" + querier_limits "github.com/grafana/loki/pkg/querier/limits" "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/stores/index/stats" listutil "github.com/grafana/loki/pkg/util" @@ -92,14 +93,7 @@ type Querier interface { Volume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) } -type Limits interface { - logql.Limits - timeRangeLimits - QueryTimeout(context.Context, string) time.Duration - MaxStreamsMatchersPerQuery(context.Context, string) int - MaxConcurrentTailRequests(context.Context, string) int - MaxEntriesLimitPerQuery(context.Context, string) int -} +type Limits querier_limits.Limits // Store is the store interface we need on the querier. type Store interface { @@ -667,12 +661,9 @@ func (q *SingleTenantQuerier) validateQueryRequest(ctx context.Context, req logq return validateQueryTimeRangeLimits(ctx, userID, q.limits, req.GetStart(), req.GetEnd()) } -type timeRangeLimits interface { - MaxQueryLookback(context.Context, string) time.Duration - MaxQueryLength(context.Context, string) time.Duration -} +type TimeRangeLimits querier_limits.TimeRangeLimits -func validateQueryTimeRangeLimits(ctx context.Context, userID string, limits timeRangeLimits, from, through time.Time) (time.Time, time.Time, error) { +func validateQueryTimeRangeLimits(ctx context.Context, userID string, limits TimeRangeLimits, from, through time.Time) (time.Time, time.Time, error) { now := nowFunc() // Clamp the time range based on the max query lookback. maxQueryLookback := limits.MaxQueryLookback(ctx, userID) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 356ff7fd65469..d89d24a1751b0 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1136,7 +1136,7 @@ func Test_validateQueryTimeRangeLimits(t *testing.T) { nowFunc = func() time.Time { return now } tests := []struct { name string - limits timeRangeLimits + limits TimeRangeLimits from time.Time through time.Time wantFrom time.Time diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index fdca5a6a5c9b6..5b9611a4f38c5 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -35,6 +35,7 @@ import ( "github.com/grafana/loki/pkg/util/httpreq" "github.com/grafana/loki/pkg/util/marshal" marshal_legacy "github.com/grafana/loki/pkg/util/marshal/legacy" + "github.com/grafana/loki/pkg/util/querylimits" ) var DefaultCodec = &Codec{} @@ -299,6 +300,7 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } + return &LabelRequest{ LabelRequest: *req, path: r.URL.Path, @@ -352,7 +354,7 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer // labelNamesRoutes is used to extract the name for querying label values. var labelNamesRoutes = regexp.MustCompile(`/loki/api/v1/label/(?P[^/]+)/values`) -// DecodeHTTPGrpcRequest decodes an httpgrp.HTTPrequest to queryrangebase.Request. +// DecodeHTTPGrpcRequest decodes an httpgrp.HTTPRequest to queryrangebase.Request. func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) (queryrangebase.Request, context.Context, error) { httpReq, err := http.NewRequest(r.Method, r.Url, io.NopCloser(bytes.NewBuffer(r.Body))) if err != nil { @@ -485,6 +487,15 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) } } +// DecodeHTTPGrpcResponse decodes an httpgrp.HTTPResponse to queryrangebase.Response. +func (Codec) DecodeHTTPGrpcResponse(r *httpgrpc.HTTPResponse, req queryrangebase.Request) (queryrangebase.Response, error) { + headers := make(http.Header) + for _, header := range r.Headers { + headers[header.Key] = header.Values + } + return decodeResponseJSONFrom(r.Body, req, headers) +} + func (Codec) EncodeHTTPGrpcResponse(ctx context.Context, req *httpgrpc.HTTPRequest, res queryrangebase.Response) (*httpgrpc.HTTPResponse, error) { version := loghttp.GetVersion(req.Url) var buf bytes.Buffer @@ -515,6 +526,14 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht header.Set(httpreq.LokiActorPathHeader, actor) } + limits := querylimits.ExtractQueryLimitsContext(ctx) + if limits != nil { + err := querylimits.InjectQueryLimitsHeader(&header, limits) + if err != nil { + return nil, err + } + } + switch request := r.(type) { case *LokiRequest: params := url.Values{ @@ -697,7 +716,6 @@ func (Codec) DecodeResponse(_ context.Context, r *http.Response, req queryrangeb } func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrangebase.Response, error) { - var buf []byte var err error if buffer, ok := r.Body.(Buffer); ok { @@ -709,6 +727,11 @@ func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrang } } + return decodeResponseJSONFrom(buf, req, r.Header) +} + +func decodeResponseJSONFrom(buf []byte, req queryrangebase.Request, headers http.Header) (queryrangebase.Response, error) { + switch req := req.(type) { case *LokiSeriesRequest: var resp loghttp.SeriesResponse @@ -728,7 +751,7 @@ func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrang Status: resp.Status, Version: uint32(loghttp.GetVersion(req.Path)), Data: data, - Headers: httpResponseHeadersToPromResponseHeaders(r.Header), + Headers: httpResponseHeadersToPromResponseHeaders(headers), }, nil case *LabelRequest: var resp loghttp.LabelResponse @@ -739,7 +762,7 @@ func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrang Status: resp.Status, Version: uint32(loghttp.GetVersion(req.Path())), Data: resp.Data, - Headers: httpResponseHeadersToPromResponseHeaders(r.Header), + Headers: httpResponseHeadersToPromResponseHeaders(headers), }, nil case *logproto.IndexStatsRequest: var resp logproto.IndexStatsResponse @@ -748,7 +771,7 @@ func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrang } return &IndexStatsResponse{ Response: &resp, - Headers: httpResponseHeadersToPromResponseHeaders(r.Header), + Headers: httpResponseHeadersToPromResponseHeaders(headers), }, nil case *logproto.VolumeRequest: var resp logproto.VolumeResponse @@ -757,7 +780,7 @@ func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrang } return &VolumeResponse{ Response: &resp, - Headers: httpResponseHeadersToPromResponseHeaders(r.Header), + Headers: httpResponseHeadersToPromResponseHeaders(headers), }, nil default: var resp loghttp.QueryResponse @@ -773,7 +796,7 @@ func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrang ResultType: loghttp.ResultTypeMatrix, Result: toProtoMatrix(resp.Data.Result.(loghttp.Matrix)), }, - Headers: convertPrometheusResponseHeadersToPointers(httpResponseHeadersToPromResponseHeaders(r.Header)), + Headers: convertPrometheusResponseHeadersToPointers(httpResponseHeadersToPromResponseHeaders(headers)), }, Statistics: resp.Data.Statistics, }, nil @@ -803,7 +826,7 @@ func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrang ResultType: loghttp.ResultTypeStream, Result: resp.Data.Result.(loghttp.Streams).ToProto(), }, - Headers: httpResponseHeadersToPromResponseHeaders(r.Header), + Headers: httpResponseHeadersToPromResponseHeaders(headers), }, nil case loghttp.ResultTypeVector: return &LokiPromResponse{ @@ -813,7 +836,7 @@ func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrang ResultType: loghttp.ResultTypeVector, Result: toProtoVector(resp.Data.Result.(loghttp.Vector)), }, - Headers: convertPrometheusResponseHeadersToPointers(httpResponseHeadersToPromResponseHeaders(r.Header)), + Headers: convertPrometheusResponseHeadersToPointers(httpResponseHeadersToPromResponseHeaders(headers)), }, Statistics: resp.Data.Statistics, }, nil @@ -825,7 +848,7 @@ func decodeResponseJSON(r *http.Response, req queryrangebase.Request) (queryrang ResultType: loghttp.ResultTypeScalar, Result: toProtoScalar(resp.Data.Result.(loghttp.Scalar)), }, - Headers: convertPrometheusResponseHeadersToPointers(httpResponseHeadersToPromResponseHeaders(r.Header)), + Headers: convertPrometheusResponseHeadersToPointers(httpResponseHeadersToPromResponseHeaders(headers)), }, Statistics: resp.Data.Statistics, }, nil @@ -877,7 +900,7 @@ func decodeResponseProtobuf(r *http.Response, req queryrangebase.Request) (query case *QueryResponse_QuantileSketches: return concrete.QuantileSketches.WithHeaders(headers), nil default: - return nil, httpgrpc.Errorf(http.StatusInternalServerError, "unsupported response type, got (%t)", resp.Response) + return nil, httpgrpc.Errorf(http.StatusInternalServerError, "unsupported response type, got (%T)", resp.Response) } } } @@ -977,31 +1000,9 @@ func encodeResponseProtobuf(ctx context.Context, res queryrangebase.Response) (* sp, _ := opentracing.StartSpanFromContext(ctx, "codec.EncodeResponse") defer sp.Finish() - p := QueryResponse{} - - switch response := res.(type) { - case *LokiPromResponse: - p.Response = &QueryResponse_Prom{response} - case *LokiResponse: - p.Response = &QueryResponse_Streams{response} - case *LokiSeriesResponse: - p.Response = &QueryResponse_Series{response} - case *MergedSeriesResponseView: - mat, err := response.Materialize() - if err != nil { - return nil, err - } - p.Response = &QueryResponse_Series{mat} - case *LokiLabelNamesResponse: - p.Response = &QueryResponse_Labels{response} - case *IndexStatsResponse: - p.Response = &QueryResponse_Stats{response} - case *TopKSketchesResponse: - p.Response = &QueryResponse_TopkSketches{response} - case *QuantileSketchResponse: - p.Response = &QueryResponse_QuantileSketches{response} - default: - return nil, httpgrpc.Errorf(http.StatusInternalServerError, fmt.Sprintf("invalid response format, got (%T)", res)) + p, err := QueryResponseWrap(res) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusInternalServerError, err.Error()) } buf, err := p.Marshal() diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index 9be6a1c856af2..2131d34eab67a 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -72,6 +72,19 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) { StartTs: start, EndTs: end, }, false}, + {"legacy query_range with refexp", func() (*http.Request, error) { + return http.NewRequest(http.MethodGet, + fmt.Sprintf(`/api/prom/query?start=%d&end=%d&query={foo="bar"}&interval=10&limit=200&direction=BACKWARD®exp=foo`, start.UnixNano(), end.UnixNano()), nil) + }, &LokiRequest{ + Query: `{foo="bar"} |~ "foo"`, + Limit: 200, + Step: 14000, // step is expected in ms; calculated default if request param not present + Interval: 10000, // interval is expected in ms + Direction: logproto.BACKWARD, + Path: "/api/prom/query", + StartTs: start, + EndTs: end, + }, false}, {"series", func() (*http.Request, error) { return http.NewRequest(http.MethodGet, fmt.Sprintf(`/series?start=%d&end=%d&match={foo="bar"}`, start.UnixNano(), end.UnixNano()), nil) diff --git a/pkg/querier/queryrange/downstreamer.go b/pkg/querier/queryrange/downstreamer.go index a98998f4ee793..b7a3d2f57a3ff 100644 --- a/pkg/querier/queryrange/downstreamer.go +++ b/pkg/querier/queryrange/downstreamer.go @@ -15,10 +15,8 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" - "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" - "github.com/grafana/loki/pkg/logql/sketch" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/metadata" "github.com/grafana/loki/pkg/logqlmodel/stats" @@ -217,65 +215,6 @@ func sampleStreamToVector(streams []queryrangebase.SampleStream) parser.Value { return xs } -func ResponseToResult(resp queryrangebase.Response) (logqlmodel.Result, error) { - switch r := resp.(type) { - case *LokiResponse: - if r.Error != "" { - return logqlmodel.Result{}, fmt.Errorf("%s: %s", r.ErrorType, r.Error) - } - - streams := make(logqlmodel.Streams, 0, len(r.Data.Result)) - - for _, stream := range r.Data.Result { - streams = append(streams, stream) - } - - return logqlmodel.Result{ - Statistics: r.Statistics, - Data: streams, - Headers: resp.GetHeaders(), - }, nil - - case *LokiPromResponse: - if r.Response.Error != "" { - return logqlmodel.Result{}, fmt.Errorf("%s: %s", r.Response.ErrorType, r.Response.Error) - } - if r.Response.Data.ResultType == loghttp.ResultTypeVector { - return logqlmodel.Result{ - Statistics: r.Statistics, - Data: sampleStreamToVector(r.Response.Data.Result), - Headers: resp.GetHeaders(), - }, nil - } - return logqlmodel.Result{ - Statistics: r.Statistics, - Data: sampleStreamToMatrix(r.Response.Data.Result), - Headers: resp.GetHeaders(), - }, nil - case *TopKSketchesResponse: - matrix, err := sketch.TopKMatrixFromProto(r.Response) - if err != nil { - return logqlmodel.Result{}, fmt.Errorf("cannot decode topk sketch: %w", err) - } - - return logqlmodel.Result{ - Data: matrix, - Headers: resp.GetHeaders(), - }, nil - case *QuantileSketchResponse: - matrix, err := sketch.QuantileSketchMatrixFromProto(r.Response) - if err != nil { - return logqlmodel.Result{}, fmt.Errorf("cannot decode quantile sketch: %w", err) - } - return logqlmodel.Result{ - Data: matrix, - Headers: resp.GetHeaders(), - }, nil - default: - return logqlmodel.Result{}, fmt.Errorf("cannot decode (%T)", resp) - } -} - // downstreamAccumulator is one of two variants: // a logsAccumulator or a bufferedAccumulator. // Which variant is detected on the first call to Accumulate. diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go index 99967b3e24513..ddf38d30cd004 100644 --- a/pkg/querier/queryrange/limits.go +++ b/pkg/querier/queryrange/limits.go @@ -14,7 +14,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/tenant" - "github.com/grafana/dskit/user" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/common/model" @@ -25,6 +25,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/syntax" + queryrange_limits "github.com/grafana/loki/pkg/querier/queryrange/limits" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/index/stats" @@ -49,27 +50,7 @@ var ( ErrMaxQueryParalellism = fmt.Errorf("querying is disabled, please contact your Loki operator") ) -// Limits extends the cortex limits interface with support for per tenant splitby parameters -type Limits interface { - queryrangebase.Limits - logql.Limits - QuerySplitDuration(string) time.Duration - MaxQuerySeries(context.Context, string) int - MaxEntriesLimitPerQuery(context.Context, string) int - MinShardingLookback(string) time.Duration - // TSDBMaxQueryParallelism returns the limit to the number of split queries the - // frontend will process in parallel for TSDB queries. - TSDBMaxQueryParallelism(context.Context, string) int - // TSDBMaxBytesPerShard returns the limit to the number of bytes a single shard - TSDBMaxBytesPerShard(string) int - - RequiredLabels(context.Context, string) []string - RequiredNumberLabels(context.Context, string) int - MaxQueryBytesRead(context.Context, string) int - MaxQuerierBytesRead(context.Context, string) int - MaxStatsCacheFreshness(context.Context, string) time.Duration - VolumeEnabled(string) bool -} +type Limits queryrange_limits.Limits type limits struct { Limits @@ -453,39 +434,33 @@ func (sl *seriesLimiter) isLimitReached() bool { type limitedRoundTripper struct { configs []config.PeriodConfig - next http.RoundTripper + next queryrangebase.Handler limits Limits - codec queryrangebase.Codec middleware queryrangebase.Middleware } +var _ queryrangebase.Handler = limitedRoundTripper{} + // NewLimitedRoundTripper creates a new roundtripper that enforces MaxQueryParallelism to the `next` roundtripper across `middlewares`. -func NewLimitedRoundTripper(next http.RoundTripper, codec queryrangebase.Codec, limits Limits, configs []config.PeriodConfig, middlewares ...queryrangebase.Middleware) http.RoundTripper { +func NewLimitedRoundTripper(next queryrangebase.Handler, limits Limits, configs []config.PeriodConfig, middlewares ...queryrangebase.Middleware) queryrangebase.Handler { transport := limitedRoundTripper{ configs: configs, next: next, - codec: codec, limits: limits, middleware: queryrangebase.MergeMiddlewares(middlewares...), } return transport } -func (rt limitedRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { +func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Request) (queryrangebase.Response, error) { var ( - ctx, cancel = context.WithCancel(r.Context()) + ctx, cancel = context.WithCancel(c) ) defer func() { cancel() }() - // Do not forward any request header. - request, err := rt.codec.DecodeRequest(ctx, r, nil) - if err != nil { - return nil, err - } - if span := opentracing.SpanFromContext(ctx); span != nil { request.LogToSpan(span) } @@ -509,7 +484,7 @@ func (rt limitedRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) sem := semaphore.NewWeighted(int64(parallelism)) - response, err := rt.middleware.Wrap( + return rt.middleware.Wrap( queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { // This inner handler is called multiple times by // sharding outer middlewares such as the downstreamer. @@ -523,35 +498,8 @@ func (rt limitedRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) } defer sem.Release(int64(1)) - return rt.do(ctx, r) + return rt.next.Do(ctx, r) })).Do(ctx, request) - if err != nil { - return nil, err - } - - return rt.codec.EncodeResponse(ctx, r, response) -} - -func (rt limitedRoundTripper) do(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { - sp, ctx := opentracing.StartSpanFromContext(ctx, "limitedRoundTripper.do") - defer sp.Finish() - - request, err := rt.codec.EncodeRequest(ctx, r) - if err != nil { - return nil, err - } - - if err := user.InjectOrgIDIntoHTTPRequest(ctx, request); err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - response, err := rt.next.RoundTrip(request) - if err != nil { - return nil, err - } - defer func() { _ = response.Body.Close() }() - - return rt.codec.DecodeResponse(ctx, response, r) } // WeightedParallelism will calculate the request parallelism to use @@ -688,13 +636,13 @@ func MinWeightedParallelism(ctx context.Context, tenantIDs []string, configs []c } // validates log entries limits -func validateMaxEntriesLimits(req *http.Request, reqLimit uint32, limits Limits) error { - tenantIDs, err := tenant.TenantIDs(req.Context()) +func validateMaxEntriesLimits(ctx context.Context, reqLimit uint32, limits Limits) error { + tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - maxEntriesCapture := func(id string) int { return limits.MaxEntriesLimitPerQuery(req.Context(), id) } + maxEntriesCapture := func(id string) int { return limits.MaxEntriesLimitPerQuery(ctx, id) } maxEntriesLimit := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, maxEntriesCapture) if int(reqLimit) > maxEntriesLimit && maxEntriesLimit != 0 { @@ -703,8 +651,8 @@ func validateMaxEntriesLimits(req *http.Request, reqLimit uint32, limits Limits) return nil } -func validateMatchers(req *http.Request, limits Limits, matchers []*labels.Matcher) error { - tenants, err := tenant.TenantIDs(req.Context()) +func validateMatchers(ctx context.Context, limits Limits, matchers []*labels.Matcher) error { + tenants, err := tenant.TenantIDs(ctx) if err != nil { return err } @@ -718,7 +666,7 @@ func validateMatchers(req *http.Request, limits Limits, matchers []*labels.Match // Enforce RequiredLabels limit for _, tenant := range tenants { - required := limits.RequiredLabels(req.Context(), tenant) + required := limits.RequiredLabels(ctx, tenant) var missing []string for _, label := range required { if _, found := actual[label]; !found { @@ -735,7 +683,7 @@ func validateMatchers(req *http.Request, limits Limits, matchers []*labels.Match // The reason to enforce this one after RequiredLabels is to avoid users // from adding enough label matchers to pass the RequiredNumberLabels limit but then // having to modify them to use the ones required by RequiredLabels. - requiredNumberLabelsCapture := func(id string) int { return limits.RequiredNumberLabels(req.Context(), id) } + requiredNumberLabelsCapture := func(id string) int { return limits.RequiredNumberLabels(ctx, id) } if requiredNumberLabels := validation.SmallestPositiveNonZeroIntPerTenant(tenants, requiredNumberLabelsCapture); requiredNumberLabels > 0 { if len(present) < requiredNumberLabels { return fmt.Errorf(requiredNumberLabelsErrTmpl, strings.Join(present, ", "), len(present), requiredNumberLabels) diff --git a/pkg/querier/queryrange/limits/defitions.go b/pkg/querier/queryrange/limits/defitions.go new file mode 100644 index 0000000000000..bc8f7d0ec94bd --- /dev/null +++ b/pkg/querier/queryrange/limits/defitions.go @@ -0,0 +1,32 @@ +package limits + +import ( + "context" + "time" + + "github.com/grafana/loki/pkg/logql" + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" +) + +// Limits extends the cortex limits interface with support for per tenant splitby parameters +// They've been extracted to avoid import cycles. +type Limits interface { + queryrangebase.Limits + logql.Limits + QuerySplitDuration(string) time.Duration + MaxQuerySeries(context.Context, string) int + MaxEntriesLimitPerQuery(context.Context, string) int + MinShardingLookback(string) time.Duration + // TSDBMaxQueryParallelism returns the limit to the number of split queries the + // frontend will process in parallel for TSDB queries. + TSDBMaxQueryParallelism(context.Context, string) int + // TSDBMaxBytesPerShard returns the limit to the number of bytes a single shard + TSDBMaxBytesPerShard(string) int + + RequiredLabels(context.Context, string) []string + RequiredNumberLabels(context.Context, string) int + MaxQueryBytesRead(context.Context, string) int + MaxQuerierBytesRead(context.Context, string) int + MaxStatsCacheFreshness(context.Context, string) time.Duration + VolumeEnabled(string) bool +} diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index cca9946f0c161..02c3862dd45a6 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -3,7 +3,6 @@ package queryrange import ( "context" "fmt" - "net/http" "sync" "testing" "time" @@ -17,11 +16,10 @@ import ( "gopkg.in/yaml.v2" "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/loki/pkg/logqlmodel/stats" - "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" + "github.com/grafana/loki/pkg/logqlmodel" + base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/config" util_log "github.com/grafana/loki/pkg/util/log" - "github.com/grafana/loki/pkg/util/marshal" "github.com/grafana/loki/pkg/util/math" ) @@ -56,7 +54,7 @@ func Test_seriesLimiter(t *testing.T) { cfg.CacheIndexStatsResults = false // split in 7 with 2 in // max. l := WithSplitByLimits(fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, time.Hour) - tpw, stopper, err := NewTripperware(cfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{ + tpw, stopper, err := NewMiddleware(cfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{ Configs: testSchemas, }, nil, false, nil) if stopper != nil { @@ -75,28 +73,16 @@ func Test_seriesLimiter(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() count, h := promqlResult(matrix) - rt.setHandler(h) - - _, err = tpw(rt).RoundTrip(req) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.NoError(t, err) require.Equal(t, 7, *count) // 2 series should not be allowed. c := new(int) m := &sync.Mutex{} - h = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + h = base.HandlerFunc(func(_ context.Context, req base.Request) (base.Response, error) { m.Lock() defer m.Unlock() defer func() { @@ -104,52 +90,51 @@ func Test_seriesLimiter(t *testing.T) { }() // first time returns a single series if *c == 0 { - if err := marshal.WriteQueryResponseJSON(matrix, stats.Result{}, rw); err != nil { - panic(err) + params, err := ParamsFromRequest(req) + if err != nil { + return nil, err } - return + return ResultToResponse(logqlmodel.Result{Data: matrix}, params) } // second time returns a different series. - if err := marshal.WriteQueryResponseJSON( - promql.Matrix{ - { - Floats: []promql.FPoint{ - { - T: toMs(testTime.Add(-4 * time.Hour)), - F: 0.013333333333333334, - }, + m := promql.Matrix{ + { + Floats: []promql.FPoint{ + { + T: toMs(testTime.Add(-4 * time.Hour)), + F: 0.013333333333333334, }, - Metric: []labels.Label{ - { - Name: "filename", - Value: `/var/hostlog/apport.log`, - }, - { - Name: "job", - Value: "anotherjob", - }, + }, + Metric: []labels.Label{ + { + Name: "filename", + Value: `/var/hostlog/apport.log`, + }, + { + Name: "job", + Value: "anotherjob", }, }, }, - stats.Result{}, - rw); err != nil { - panic(err) } + params, err := ParamsFromRequest(req) + if err != nil { + return nil, err + } + return ResultToResponse(logqlmodel.Result{Data: m}, params) }) - rt.setHandler(h) - _, err = tpw(rt).RoundTrip(req) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Error(t, err) require.LessOrEqual(t, *c, 4) } func Test_MaxQueryParallelism(t *testing.T) { maxQueryParallelism := 2 - f, err := newfakeRoundTripper() - require.Nil(t, err) + var count atomic.Int32 var max atomic.Int32 - f.setHandler(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + h := base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { cur := count.Inc() if cur > max.Load() { max.Store(cur) @@ -157,16 +142,14 @@ func Test_MaxQueryParallelism(t *testing.T) { defer count.Dec() // simulate some work time.Sleep(20 * time.Millisecond) - })) + return base.NewEmptyPrometheusResponse(), nil + }) ctx := user.InjectOrgID(context.Background(), "foo") - r, err := http.NewRequestWithContext(ctx, "GET", "/query_range", http.NoBody) - require.Nil(t, err) - - _, _ = NewLimitedRoundTripper(f, DefaultCodec, fakeLimits{maxQueryParallelism: maxQueryParallelism}, + _, _ = NewLimitedRoundTripper(h, fakeLimits{maxQueryParallelism: maxQueryParallelism}, testSchemas, - queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { - return queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + base.MiddlewareFunc(func(next base.Handler) base.Handler { + return base.HandlerFunc(func(c context.Context, r base.Request) (base.Response, error) { var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) @@ -179,58 +162,52 @@ func Test_MaxQueryParallelism(t *testing.T) { return nil, nil }) }), - ).RoundTrip(r) + ).Do(ctx, &LokiRequest{}) maxFound := int(max.Load()) require.LessOrEqual(t, maxFound, maxQueryParallelism, "max query parallelism: ", maxFound, " went over the configured one:", maxQueryParallelism) } func Test_MaxQueryParallelismLateScheduling(t *testing.T) { maxQueryParallelism := 2 - f, err := newfakeRoundTripper() - require.Nil(t, err) - f.setHandler(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + h := base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { // simulate some work time.Sleep(20 * time.Millisecond) - })) + return base.NewEmptyPrometheusResponse(), nil + }) ctx := user.InjectOrgID(context.Background(), "foo") - r, err := http.NewRequestWithContext(ctx, "GET", "/query_range", http.NoBody) - require.Nil(t, err) - - _, _ = NewLimitedRoundTripper(f, DefaultCodec, fakeLimits{maxQueryParallelism: maxQueryParallelism}, + _, err := NewLimitedRoundTripper(h, fakeLimits{maxQueryParallelism: maxQueryParallelism}, testSchemas, - queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { - return queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + base.MiddlewareFunc(func(next base.Handler) base.Handler { + return base.HandlerFunc(func(c context.Context, r base.Request) (base.Response, error) { for i := 0; i < 10; i++ { go func() { - _, _ = next.Do(c, &LokiRequest{}) + _, _ = next.Do(c, r) }() } return nil, nil }) }), - ).RoundTrip(r) + ).Do(ctx, &LokiRequest{}) + + require.NoError(t, err) } func Test_MaxQueryParallelismDisable(t *testing.T) { maxQueryParallelism := 0 - f, err := newfakeRoundTripper() - require.Nil(t, err) - f.setHandler(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + h := base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { // simulate some work time.Sleep(20 * time.Millisecond) - })) + return base.NewEmptyPrometheusResponse(), nil + }) ctx := user.InjectOrgID(context.Background(), "foo") - r, err := http.NewRequestWithContext(ctx, "GET", "/query_range", http.NoBody) - require.Nil(t, err) - - _, err = NewLimitedRoundTripper(f, DefaultCodec, fakeLimits{maxQueryParallelism: maxQueryParallelism}, + _, err := NewLimitedRoundTripper(h, fakeLimits{maxQueryParallelism: maxQueryParallelism}, testSchemas, - queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { - return queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + base.MiddlewareFunc(func(next base.Handler) base.Handler { + return base.HandlerFunc(func(c context.Context, r base.Request) (base.Response, error) { for i := 0; i < 10; i++ { go func() { _, _ = next.Do(c, &LokiRequest{}) @@ -239,12 +216,12 @@ func Test_MaxQueryParallelismDisable(t *testing.T) { return nil, nil }) }), - ).RoundTrip(r) + ).Do(ctx, &LokiRequest{}) require.Error(t, err) } func Test_MaxQueryLookBack(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{ + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{ maxQueryLookback: 1 * time.Hour, maxQueryParallelism: 1, }, config.SchemaConfig{ @@ -254,9 +231,6 @@ func Test_MaxQueryLookBack(t *testing.T) { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() lreq := &LokiRequest{ Query: `{app="foo"} |= "foo"`, @@ -268,15 +242,17 @@ func Test_MaxQueryLookBack(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) + called := false + h := base.HandlerFunc(func(context.Context, base.Request) (base.Response, error) { + called = true + return nil, nil + }) - _, err = tpw(rt).RoundTrip(req) + resp, err := tpw.Wrap(h).Do(ctx, lreq) require.NoError(t, err) + require.False(t, called) + require.Equal(t, resp.(*LokiResponse).Status, "success") } func Test_GenerateCacheKey_NoDivideZero(t *testing.T) { @@ -436,19 +412,6 @@ func Test_WeightedParallelism_DivideByZeroError(t *testing.T) { }) } -func getFakeStatsHandler(retBytes uint64) (queryrangebase.Handler, *int, error) { - fakeRT, err := newfakeRoundTripper() - if err != nil { - return nil, nil, err - } - - count, statsHandler := indexStatsResult(logproto.IndexStatsResponse{Bytes: retBytes}) - - fakeRT.setHandler(statsHandler) - - return queryrangebase.NewRoundTripperHandler(fakeRT, DefaultCodec), count, nil -} - func Test_MaxQuerySize(t *testing.T) { const statsBytes = 1000 @@ -569,17 +532,11 @@ func Test_MaxQuerySize(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - queryStatsHandler, queryStatsHits, err := getFakeStatsHandler(uint64(statsBytes / math.Max(tc.expectedQueryStatsHits, 1))) - require.NoError(t, err) + queryStatsHits, queryStatsHandler := indexStatsResult(logproto.IndexStatsResponse{Bytes: uint64(statsBytes / math.Max(tc.expectedQueryStatsHits, 1))}) - querierStatsHandler, querierStatsHits, err := getFakeStatsHandler(uint64(statsBytes / math.Max(tc.expectedQuerierStatsHits, 1))) - require.NoError(t, err) - - fakeRT, err := newfakeRoundTripper() - require.NoError(t, err) + querierStatsHits, querierStatsHandler := indexStatsResult(logproto.IndexStatsResponse{Bytes: uint64(statsBytes / math.Max(tc.expectedQuerierStatsHits, 1))}) _, promHandler := promqlResult(matrix) - fakeRT.setHandler(promHandler) lokiReq := &LokiRequest{ Query: tc.query, @@ -591,19 +548,13 @@ func Test_MaxQuerySize(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "foo") - req, err := DefaultCodec.EncodeRequest(ctx, lokiReq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - middlewares := []queryrangebase.Middleware{ + middlewares := []base.Middleware{ NewQuerySizeLimiterMiddleware(schemas, testEngineOpts, util_log.Logger, tc.limits, queryStatsHandler), NewQuerierSizeLimiterMiddleware(schemas, testEngineOpts, util_log.Logger, tc.limits, querierStatsHandler), } - _, err = queryrangebase.NewRoundTripper(fakeRT, DefaultCodec, nil, middlewares...).RoundTrip(req) + _, err := base.MergeMiddlewares(middlewares...).Wrap(promHandler).Do(ctx, lokiReq) if tc.shouldErr { require.Error(t, err) @@ -627,7 +578,7 @@ func Test_MaxQuerySize_MaxLookBackPeriod(t *testing.T) { maxQuerierBytesRead: 1 << 10, } - statsHandler := queryrangebase.HandlerFunc(func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + statsHandler := base.HandlerFunc(func(_ context.Context, req base.Request) (base.Response, error) { // This is the actual check that we're testing. require.Equal(t, testTime.Add(-engineOpts.MaxLookBackPeriod).UnixMilli(), req.GetStart()) @@ -640,7 +591,7 @@ func Test_MaxQuerySize_MaxLookBackPeriod(t *testing.T) { for _, tc := range []struct { desc string - middleware queryrangebase.Middleware + middleware base.Middleware }{ { desc: "QuerySizeLimiter", @@ -661,7 +612,7 @@ func Test_MaxQuerySize_MaxLookBackPeriod(t *testing.T) { } handler := tc.middleware.Wrap( - queryrangebase.HandlerFunc(func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + base.HandlerFunc(func(_ context.Context, req base.Request) (base.Response, error) { return &LokiResponse{}, nil }), ) diff --git a/pkg/querier/queryrange/marshal.go b/pkg/querier/queryrange/marshal.go index 512cfb321b8f4..b177f5cf86324 100644 --- a/pkg/querier/queryrange/marshal.go +++ b/pkg/querier/queryrange/marshal.go @@ -112,6 +112,65 @@ func ResultToResponse(result logqlmodel.Result, params logql.Params) (queryrange return nil, fmt.Errorf("unsupported data type: %t", result.Data) } +func ResponseToResult(resp queryrangebase.Response) (logqlmodel.Result, error) { + switch r := resp.(type) { + case *LokiResponse: + if r.Error != "" { + return logqlmodel.Result{}, fmt.Errorf("%s: %s", r.ErrorType, r.Error) + } + + streams := make(logqlmodel.Streams, 0, len(r.Data.Result)) + + for _, stream := range r.Data.Result { + streams = append(streams, stream) + } + + return logqlmodel.Result{ + Statistics: r.Statistics, + Data: streams, + Headers: resp.GetHeaders(), + }, nil + + case *LokiPromResponse: + if r.Response.Error != "" { + return logqlmodel.Result{}, fmt.Errorf("%s: %s", r.Response.ErrorType, r.Response.Error) + } + if r.Response.Data.ResultType == loghttp.ResultTypeVector { + return logqlmodel.Result{ + Statistics: r.Statistics, + Data: sampleStreamToVector(r.Response.Data.Result), + Headers: resp.GetHeaders(), + }, nil + } + return logqlmodel.Result{ + Statistics: r.Statistics, + Data: sampleStreamToMatrix(r.Response.Data.Result), + Headers: resp.GetHeaders(), + }, nil + case *TopKSketchesResponse: + matrix, err := sketch.TopKMatrixFromProto(r.Response) + if err != nil { + return logqlmodel.Result{}, fmt.Errorf("cannot decode topk sketch: %w", err) + } + + return logqlmodel.Result{ + Data: matrix, + Headers: resp.GetHeaders(), + }, nil + case *QuantileSketchResponse: + matrix, err := sketch.QuantileSketchMatrixFromProto(r.Response) + if err != nil { + return logqlmodel.Result{}, fmt.Errorf("cannot decode quantile sketch: %w", err) + } + return logqlmodel.Result{ + Data: matrix, + Headers: resp.GetHeaders(), + }, nil + default: + return logqlmodel.Result{}, fmt.Errorf("cannot decode (%T)", resp) + } +} + func QueryResponseWrap(res queryrangebase.Response) (*QueryResponse, error) { p := &QueryResponse{} @@ -141,5 +200,4 @@ func QueryResponseWrap(res queryrangebase.Response) (*QueryResponse, error) { } return p, nil - } diff --git a/pkg/querier/queryrange/queryrangebase/roundtrip.go b/pkg/querier/queryrange/queryrangebase/roundtrip.go index 3cfb7ab849a8a..a2dc31be0bbc5 100644 --- a/pkg/querier/queryrange/queryrangebase/roundtrip.go +++ b/pkg/querier/queryrange/queryrangebase/roundtrip.go @@ -18,13 +18,9 @@ package queryrangebase import ( "context" "flag" - "io" "net/http" "time" - "github.com/grafana/dskit/httpgrpc" - "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" "github.com/pkg/errors" ) @@ -116,79 +112,3 @@ type RoundTripFunc func(*http.Request) (*http.Response, error) func (f RoundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) } - -type roundTripper struct { - roundTripperHandler - handler Handler - headers []string -} - -// NewRoundTripper merges a set of middlewares into an handler, then inject it into the `next` roundtripper -// using the codec to translate requests and responses. -func NewRoundTripper(next http.RoundTripper, codec Codec, headers []string, middlewares ...Middleware) http.RoundTripper { - transport := roundTripper{ - roundTripperHandler: roundTripperHandler{ - next: next, - codec: codec, - }, - headers: headers, - } - transport.handler = MergeMiddlewares(middlewares...).Wrap(&transport) - return transport -} - -func (q roundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - // include the headers specified in the roundTripper during decoding the request. - request, err := q.codec.DecodeRequest(r.Context(), r, q.headers) - if err != nil { - return nil, err - } - - if span := opentracing.SpanFromContext(r.Context()); span != nil { - request.LogToSpan(span) - } - - response, err := q.handler.Do(r.Context(), request) - if err != nil { - return nil, err - } - - return q.codec.EncodeResponse(r.Context(), r, response) -} - -type roundTripperHandler struct { - next http.RoundTripper - codec Codec -} - -// NewRoundTripperHandler returns a handler that translates Loki requests into http requests -// and passes down these to the next RoundTripper. -func NewRoundTripperHandler(next http.RoundTripper, codec Codec) Handler { - return roundTripperHandler{ - next: next, - codec: codec, - } -} - -// Do implements Handler. -func (q roundTripperHandler) Do(ctx context.Context, r Request) (Response, error) { - request, err := q.codec.EncodeRequest(ctx, r) - if err != nil { - return nil, err - } - - if err := user.InjectOrgIDIntoHTTPRequest(ctx, request); err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - response, err := q.next.RoundTrip(request) - if err != nil { - return nil, err - } - defer func() { - _, _ = io.Copy(io.Discard, io.LimitReader(response.Body, 1024)) //nolint:errcheck - response.Body.Close() - }() - - return q.codec.DecodeResponse(ctx, response, r) -} diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go index 038e0611f9362..b2af68b55b783 100644 --- a/pkg/querier/queryrange/querysharding.go +++ b/pkg/querier/queryrange/querysharding.go @@ -36,7 +36,6 @@ func NewQueryShardMiddleware( logger log.Logger, confs ShardingConfigs, engineOpts logql.EngineOpts, - _ queryrangebase.Codec, middlewareMetrics *queryrangebase.InstrumentMiddlewareMetrics, shardingMetrics *logql.MapperMetrics, limits Limits, diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go index 1aa2b601057bb..e3e83f967ac04 100644 --- a/pkg/querier/queryrange/querysharding_test.go +++ b/pkg/querier/queryrange/querysharding_test.go @@ -410,7 +410,7 @@ func Test_InstantSharding(t *testing.T) { cpyPeriodConf.RowShards = 3 sharding := NewQueryShardMiddleware(log.NewNopLogger(), ShardingConfigs{ cpyPeriodConf, - }, testEngineOpts, DefaultCodec, queryrangebase.NewInstrumentMiddlewareMetrics(nil), + }, testEngineOpts, queryrangebase.NewInstrumentMiddlewareMetrics(nil), nilShardingMetrics, fakeLimits{ maxSeries: math.MaxInt32, diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 450feb4b286f4..9c409d14a5a9f 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -17,11 +17,11 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" - "github.com/grafana/loki/pkg/loghttp" + "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel/stats" - "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" + base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/config" logutil "github.com/grafana/loki/pkg/util/log" @@ -29,7 +29,7 @@ import ( // Config is the configuration for the queryrange tripperware type Config struct { - queryrangebase.Config `yaml:",inline"` + base.Config `yaml:",inline"` Transformer UserIDTransformer `yaml:"-"` CacheIndexStatsResults bool `yaml:"cache_index_stats_results"` StatsCacheConfig IndexStatsCacheConfig `yaml:"index_stats_results_cache" doc:"description=If a cache config is not specified and cache_index_stats_results is true, the config for the results cache is used."` @@ -76,7 +76,7 @@ func (s StopperWrapper) Stop() { } } -func newResultsCacheFromConfig(cfg queryrangebase.ResultsCacheConfig, registerer prometheus.Registerer, log log.Logger, cacheType stats.CacheType) (cache.Cache, error) { +func newResultsCacheFromConfig(cfg base.ResultsCacheConfig, registerer prometheus.Registerer, log log.Logger, cacheType stats.CacheType) (cache.Cache, error) { if !cache.IsCacheConfigured(cfg.CacheConfig) { return nil, errors.Errorf("%s cache is not configured", cacheType) } @@ -93,17 +93,17 @@ func newResultsCacheFromConfig(cfg queryrangebase.ResultsCacheConfig, registerer return c, nil } -// NewTripperware returns a Tripperware configured with middlewares to align, split and cache requests. -func NewTripperware( +// NewMiddleware returns a Middleware configured with middlewares to align, split and cache requests. +func NewMiddleware( cfg Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, - cacheGenNumLoader queryrangebase.CacheGenNumberLoader, + cacheGenNumLoader base.CacheGenNumberLoader, retentionEnabled bool, registerer prometheus.Registerer, -) (queryrangebase.Tripperware, Stopper, error) { +) (base.Middleware, Stopper, error) { metrics := NewMetrics(registerer) var ( @@ -148,7 +148,7 @@ func NewTripperware( } } - var codec queryrangebase.Codec = DefaultCodec + var codec base.Codec = DefaultCodec if cfg.RequiredQueryResponseFormat == "protobuf" { codec = &RequestProtobufCodec{} } @@ -165,7 +165,7 @@ func NewTripperware( return nil, nil, err } - limitedTripperware, err := NewLimitedTripperware(cfg, engineOpts, log, limits, schema, codec, metrics, indexStatsTripperware) + limitedTripperware, err := NewLimitedTripperware(cfg, engineOpts, log, limits, schema, metrics, indexStatsTripperware, codec) if err != nil { return nil, nil, err } @@ -177,7 +177,7 @@ func NewTripperware( return nil, nil, err } - seriesTripperware, err := NewSeriesTripperware(cfg, log, limits, codec, metrics, schema) + seriesTripperware, err := NewSeriesTripperware(cfg, log, limits, metrics, schema, DefaultCodec) if err != nil { return nil, nil, err } @@ -197,32 +197,32 @@ func NewTripperware( return nil, nil, err } - return func(next http.RoundTripper) http.RoundTripper { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { var ( - metricRT = metricsTripperware(next) - limitedRT = limitedTripperware(next) - logFilterRT = logFilterTripperware(next) - seriesRT = seriesTripperware(next) - labelsRT = labelsTripperware(next) - instantRT = instantMetricTripperware(next) - statsRT = indexStatsTripperware(next) - seriesVolumeRT = seriesVolumeTripperware(next) + metricRT = metricsTripperware.Wrap(next) + limitedRT = limitedTripperware.Wrap(next) + logFilterRT = logFilterTripperware.Wrap(next) + seriesRT = seriesTripperware.Wrap(next) + labelsRT = labelsTripperware.Wrap(next) + instantRT = instantMetricTripperware.Wrap(next) + statsRT = indexStatsTripperware.Wrap(next) + seriesVolumeRT = seriesVolumeTripperware.Wrap(next) ) return newRoundTripper(log, next, limitedRT, logFilterRT, metricRT, seriesRT, labelsRT, instantRT, statsRT, seriesVolumeRT, limits) - }, StopperWrapper{resultsCache, statsCache, volumeCache}, nil + }), StopperWrapper{resultsCache, statsCache, volumeCache}, nil } type roundTripper struct { logger log.Logger - next, limited, log, metric, series, labels, instantMetric, indexStats, seriesVolume http.RoundTripper + next, limited, log, metric, series, labels, instantMetric, indexStats, seriesVolume base.Handler limits Limits } // newRoundTripper creates a new queryrange roundtripper -func newRoundTripper(logger log.Logger, next, limited, log, metric, series, labels, instantMetric, indexStats, seriesVolume http.RoundTripper, limits Limits) roundTripper { +func newRoundTripper(logger log.Logger, next, limited, log, metric, series, labels, instantMetric, indexStats, seriesVolume base.Handler, limits Limits) roundTripper { return roundTripper{ logger: logger, limited: limited, @@ -238,26 +238,18 @@ func newRoundTripper(logger log.Logger, next, limited, log, metric, series, labe } } -func (r roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - logger := logutil.WithContext(req.Context(), r.logger) - err := req.ParseForm() - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } +func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, error) { + logger := logutil.WithContext(ctx, r.logger) - switch op := getOperation(req.URL.Path); op { - case QueryRangeOp: - rangeQuery, err := loghttp.ParseRangeQuery(req) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - expr, err := syntax.ParseExpr(rangeQuery.Query) + switch op := req.(type) { + case *LokiRequest: + expr, err := syntax.ParseExpr(op.Query) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - queryHash := logql.HashedQuery(rangeQuery.Query) - level.Info(logger).Log("msg", "executing query", "type", "range", "query", rangeQuery.Query, "length", rangeQuery.End.Sub(rangeQuery.Start), "step", rangeQuery.Step, "query_hash", queryHash) + queryHash := logql.HashedQuery(op.Query) + level.Info(logger).Log("msg", "executing query", "type", "range", "query", op.Query, "length", op.EndTs.Sub(op.StartTs), "step", op.Step, "query_hash", queryHash) switch e := expr.(type) { case syntax.SampleExpr: @@ -268,112 +260,70 @@ func (r roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { } for _, g := range groups { - if err := validateMatchers(req, r.limits, g.Matchers); err != nil { + if err := validateMatchers(ctx, r.limits, g.Matchers); err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } } - return r.metric.RoundTrip(req) + return r.metric.Do(ctx, req) case syntax.LogSelectorExpr: - // Note, this function can mutate the request - expr, err := transformRegexQuery(req, e) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - if err := validateMaxEntriesLimits(req, rangeQuery.Limit, r.limits); err != nil { + if err := validateMaxEntriesLimits(ctx, op.Limit, r.limits); err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - if err := validateMatchers(req, r.limits, e.Matchers()); err != nil { + if err := validateMatchers(ctx, r.limits, e.Matchers()); err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } // Only filter expressions are query sharded - if !expr.HasFilter() { - return r.limited.RoundTrip(req) + if !e.HasFilter() { + return r.limited.Do(ctx, req) } - return r.log.RoundTrip(req) + return r.log.Do(ctx, req) default: - return r.next.RoundTrip(req) - } - case SeriesOp: - sr, err := loghttp.ParseAndValidateSeriesQuery(req) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return r.next.Do(ctx, req) } + case *LokiSeriesRequest: + level.Info(logger).Log("msg", "executing query", "type", "series", "match", logql.PrintMatches(op.Match), "length", op.EndTs.Sub(op.StartTs)) - level.Info(logger).Log("msg", "executing query", "type", "series", "match", logql.PrintMatches(sr.Groups), "length", sr.End.Sub(sr.Start)) + return r.series.Do(ctx, req) + case *LabelRequest: + level.Info(logger).Log("msg", "executing query", "type", "labels", "label", op.Name, "length", op.LabelRequest.End.Sub(*op.LabelRequest.Start), "query", op.Query) - return r.series.RoundTrip(req) - case LabelNamesOp: - lr, err := loghttp.ParseLabelQuery(req) + return r.labels.Do(ctx, req) + case *LokiInstantRequest: + expr, err := syntax.ParseExpr(op.Query) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - level.Info(logger).Log("msg", "executing query", "type", "labels", "label", lr.Name, "length", lr.End.Sub(*lr.Start), "query", lr.Query) - - return r.labels.RoundTrip(req) - case InstantQueryOp: - instantQuery, err := loghttp.ParseInstantQuery(req) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - expr, err := syntax.ParseExpr(instantQuery.Query) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - queryHash := logql.HashedQuery(instantQuery.Query) - level.Info(logger).Log("msg", "executing query", "type", "instant", "query", instantQuery.Query, "query_hash", queryHash) + queryHash := logql.HashedQuery(op.Query) + level.Info(logger).Log("msg", "executing query", "type", "instant", "query", op.Query, "query_hash", queryHash) switch expr.(type) { case syntax.SampleExpr: - return r.instantMetric.RoundTrip(req) + return r.instantMetric.Do(ctx, req) default: - return r.next.RoundTrip(req) - } - case IndexStatsOp: - statsQuery, err := loghttp.ParseIndexStatsQuery(req) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return r.next.Do(ctx, req) } - level.Info(logger).Log("msg", "executing query", "type", "stats", "query", statsQuery.Query, "length", statsQuery.End.Sub(statsQuery.Start)) + case *logproto.IndexStatsRequest: + level.Info(logger).Log("msg", "executing query", "type", "stats", "query", op.Matchers, "length", op.Through.Sub(op.From)) - return r.indexStats.RoundTrip(req) - case VolumeOp: - volumeQuery, err := loghttp.ParseVolumeInstantQuery(req) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - level.Info(logger).Log( - "msg", "executing query", - "type", "volume", - "query", volumeQuery.Query, - "length", volumeQuery.End.Sub(volumeQuery.Start), - "limit", volumeQuery.Limit, - "aggregate_by", volumeQuery.AggregateBy, - ) - - return r.seriesVolume.RoundTrip(req) - case VolumeRangeOp: - volumeQuery, err := loghttp.ParseVolumeRangeQuery(req) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } + return r.indexStats.Do(ctx, req) + case *logproto.VolumeRequest: level.Info(logger).Log( "msg", "executing query", "type", "volume_range", - "query", volumeQuery.Query, - "length", volumeQuery.End.Sub(volumeQuery.Start), - "step", volumeQuery.Step, - "limit", volumeQuery.Limit, - "aggregate_by", volumeQuery.AggregateBy, + "query", op.Matchers, + "length", op.Through.Sub(op.From), + "step", op.Step, + "limit", op.Limit, + "aggregate_by", op.AggregateBy, ) - return r.seriesVolume.RoundTrip(req) + return r.seriesVolume.Do(ctx, req) default: - return r.next.RoundTrip(req) + return r.next.Do(ctx, req) } } @@ -434,20 +384,20 @@ func NewLogFilterTripperware( log log.Logger, limits Limits, schema config.SchemaConfig, - codec queryrangebase.Codec, + merger base.Merger, c cache.Cache, metrics *Metrics, - indexStatsTripperware queryrangebase.Tripperware, -) (queryrangebase.Tripperware, error) { - return func(next http.RoundTripper) http.RoundTripper { - statsHandler := queryrangebase.NewRoundTripperHandler(indexStatsTripperware(next), codec) + indexStatsTripperware base.Middleware, +) (base.Middleware, error) { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { + statsHandler := indexStatsTripperware.Wrap(next) - queryRangeMiddleware := []queryrangebase.Middleware{ + queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(limits), NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), - queryrangebase.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - SplitByIntervalMiddleware(schema.Configs, limits, codec, splitByTime, metrics.SplitByMetrics), + base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), + SplitByIntervalMiddleware(schema.Configs, limits, merger, splitByTime, metrics.SplitByMetrics), } if cfg.CacheResults { @@ -455,7 +405,7 @@ func NewLogFilterTripperware( log, limits, c, - func(_ context.Context, r queryrangebase.Request) bool { + func(_ context.Context, r base.Request) bool { return !r.GetCachingOptions().Disabled }, cfg.Transformer, @@ -463,7 +413,7 @@ func NewLogFilterTripperware( ) queryRangeMiddleware = append( queryRangeMiddleware, - queryrangebase.InstrumentMiddleware("log_results_cache", metrics.InstrumentMiddlewareMetrics), + base.InstrumentMiddleware("log_results_cache", metrics.InstrumentMiddlewareMetrics), queryCacheMiddleware, ) } @@ -474,7 +424,6 @@ func NewLogFilterTripperware( log, schema.Configs, engineOpts, - codec, metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware metrics.MiddlewareMapperMetrics.shardMapper, limits, @@ -492,16 +441,16 @@ func NewLogFilterTripperware( if cfg.MaxRetries > 0 { queryRangeMiddleware = append( - queryRangeMiddleware, queryrangebase.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), - queryrangebase.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), + queryRangeMiddleware, base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), ) } if len(queryRangeMiddleware) > 0 { - return NewLimitedRoundTripper(next, codec, limits, schema.Configs, queryRangeMiddleware...) + return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) } return next - }, nil + }), nil } // NewLimitedTripperware creates a new frontend tripperware responsible for handling log requests which are label matcher only, no filter expression. @@ -511,32 +460,32 @@ func NewLimitedTripperware( log log.Logger, limits Limits, schema config.SchemaConfig, - codec queryrangebase.Codec, metrics *Metrics, - indexStatsTripperware queryrangebase.Tripperware, -) (queryrangebase.Tripperware, error) { - return func(next http.RoundTripper) http.RoundTripper { - statsHandler := queryrangebase.NewRoundTripperHandler(indexStatsTripperware(next), codec) + indexStatsTripperware base.Middleware, + merger base.Merger, +) (base.Middleware, error) { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { + statsHandler := indexStatsTripperware.Wrap(next) - queryRangeMiddleware := []queryrangebase.Middleware{ + queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(limits), NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), - queryrangebase.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), + base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), // Limited queries only need to fetch up to the requested line limit worth of logs, // Our defaults for splitting and parallelism are much too aggressive for large customers and result in // potentially GB of logs being returned by all the shards and splits which will overwhelm the frontend // Therefore we force max parallelism to one so that these queries are executed sequentially. // Below we also fix the number of shards to a static number. - SplitByIntervalMiddleware(schema.Configs, WithMaxParallelism(limits, 1), codec, splitByTime, metrics.SplitByMetrics), + SplitByIntervalMiddleware(schema.Configs, WithMaxParallelism(limits, 1), merger, splitByTime, metrics.SplitByMetrics), NewQuerierSizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), } if len(queryRangeMiddleware) > 0 { - return NewLimitedRoundTripper(next, codec, limits, schema.Configs, queryRangeMiddleware...) + return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) } return next - }, nil + }), nil } // NewSeriesTripperware creates a new frontend tripperware responsible for handling series requests @@ -544,24 +493,24 @@ func NewSeriesTripperware( cfg Config, log log.Logger, limits Limits, - codec queryrangebase.Codec, metrics *Metrics, schema config.SchemaConfig, -) (queryrangebase.Tripperware, error) { - queryRangeMiddleware := []queryrangebase.Middleware{ + merger base.Merger, +) (base.Middleware, error) { + queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(limits), - queryrangebase.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), + base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), // The Series API needs to pull one chunk per series to extract the label set, which is much cheaper than iterating through all matching chunks. // Force a 24 hours split by for series API, this will be more efficient with our static daily bucket storage. // This would avoid queriers downloading chunks for same series over and over again for serving smaller queries. - SplitByIntervalMiddleware(schema.Configs, WithSplitByLimits(limits, 24*time.Hour), codec, splitByTime, metrics.SplitByMetrics), + SplitByIntervalMiddleware(schema.Configs, WithSplitByLimits(limits, 24*time.Hour), merger, splitByTime, metrics.SplitByMetrics), } if cfg.MaxRetries > 0 { queryRangeMiddleware = append(queryRangeMiddleware, - queryrangebase.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), - queryrangebase.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), + base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), ) } @@ -573,17 +522,17 @@ func NewSeriesTripperware( metrics.InstrumentMiddlewareMetrics, metrics.MiddlewareMapperMetrics.shardMapper, limits, - codec, + merger, ), ) } - return func(next http.RoundTripper) http.RoundTripper { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { if len(queryRangeMiddleware) > 0 { - return NewLimitedRoundTripper(next, codec, limits, schema.Configs, queryRangeMiddleware...) + return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) } return next - }, nil + }), nil } // NewLabelsTripperware creates a new frontend tripperware responsible for handling labels requests. @@ -591,33 +540,33 @@ func NewLabelsTripperware( cfg Config, log log.Logger, limits Limits, - codec queryrangebase.Codec, + merger base.Merger, metrics *Metrics, schema config.SchemaConfig, -) (queryrangebase.Tripperware, error) { - queryRangeMiddleware := []queryrangebase.Middleware{ +) (base.Middleware, error) { + queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(limits), - queryrangebase.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), + base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), // Force a 24 hours split by for labels API, this will be more efficient with our static daily bucket storage. // This is because the labels API is an index-only operation. - SplitByIntervalMiddleware(schema.Configs, WithSplitByLimits(limits, 24*time.Hour), codec, splitByTime, metrics.SplitByMetrics), + SplitByIntervalMiddleware(schema.Configs, WithSplitByLimits(limits, 24*time.Hour), merger, splitByTime, metrics.SplitByMetrics), } if cfg.MaxRetries > 0 { queryRangeMiddleware = append(queryRangeMiddleware, - queryrangebase.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), - queryrangebase.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), + base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), ) } - return func(next http.RoundTripper) http.RoundTripper { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { if len(queryRangeMiddleware) > 0 { // Do not forward any request header. - return queryrangebase.NewRoundTripper(next, codec, nil, queryRangeMiddleware...) + return base.MergeMiddlewares(queryRangeMiddleware...).Wrap(next) } return next - }, nil + }), nil } // NewMetricTripperware creates a new frontend tripperware responsible for handling metric queries @@ -627,30 +576,30 @@ func NewMetricTripperware( log log.Logger, limits Limits, schema config.SchemaConfig, - codec queryrangebase.Codec, + merger base.Merger, c cache.Cache, - cacheGenNumLoader queryrangebase.CacheGenNumberLoader, + cacheGenNumLoader base.CacheGenNumberLoader, retentionEnabled bool, - extractor queryrangebase.Extractor, + extractor base.Extractor, metrics *Metrics, - indexStatsTripperware queryrangebase.Tripperware, -) (queryrangebase.Tripperware, error) { + indexStatsTripperware base.Middleware, +) (base.Middleware, error) { cacheKey := cacheKeyLimits{limits, cfg.Transformer} - var queryCacheMiddleware queryrangebase.Middleware + var queryCacheMiddleware base.Middleware if cfg.CacheResults { var err error - queryCacheMiddleware, err = queryrangebase.NewResultsCacheMiddleware( + queryCacheMiddleware, err = base.NewResultsCacheMiddleware( log, c, cacheKey, limits, - codec, + merger, extractor, cacheGenNumLoader, - func(_ context.Context, r queryrangebase.Request) bool { + func(_ context.Context, r base.Request) bool { return !r.GetCachingOptions().Disabled }, - func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int { + func(ctx context.Context, tenantIDs []string, r base.Request) int { return MinWeightedParallelism( ctx, tenantIDs, @@ -668,10 +617,10 @@ func NewMetricTripperware( } } - return func(next http.RoundTripper) http.RoundTripper { - statsHandler := queryrangebase.NewRoundTripperHandler(indexStatsTripperware(next), codec) + return base.MiddlewareFunc(func(next base.Handler) base.Handler { + statsHandler := indexStatsTripperware.Wrap(next) - queryRangeMiddleware := []queryrangebase.Middleware{ + queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(limits), } @@ -679,22 +628,22 @@ func NewMetricTripperware( if cfg.AlignQueriesWithStep { queryRangeMiddleware = append( queryRangeMiddleware, - queryrangebase.InstrumentMiddleware("step_align", metrics.InstrumentMiddlewareMetrics), - queryrangebase.StepAlignMiddleware, + base.InstrumentMiddleware("step_align", metrics.InstrumentMiddlewareMetrics), + base.StepAlignMiddleware, ) } queryRangeMiddleware = append( queryRangeMiddleware, NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), - queryrangebase.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - SplitByIntervalMiddleware(schema.Configs, limits, codec, splitMetricByTime, metrics.SplitByMetrics), + base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), + SplitByIntervalMiddleware(schema.Configs, limits, merger, splitMetricByTime, metrics.SplitByMetrics), ) if cfg.CacheResults { queryRangeMiddleware = append( queryRangeMiddleware, - queryrangebase.InstrumentMiddleware("results_cache", metrics.InstrumentMiddlewareMetrics), + base.InstrumentMiddleware("results_cache", metrics.InstrumentMiddlewareMetrics), queryCacheMiddleware, ) } @@ -705,7 +654,6 @@ func NewMetricTripperware( log, schema.Configs, engineOpts, - codec, metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware metrics.MiddlewareMapperMetrics.shardMapper, limits, @@ -724,23 +672,24 @@ func NewMetricTripperware( if cfg.MaxRetries > 0 { queryRangeMiddleware = append( queryRangeMiddleware, - queryrangebase.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), - queryrangebase.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), + base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), ) } // Finally, if the user selected any query range middleware, stitch it in. if len(queryRangeMiddleware) > 0 { - rt := NewLimitedRoundTripper(next, codec, limits, schema.Configs, queryRangeMiddleware...) - return queryrangebase.RoundTripFunc(func(r *http.Request) (*http.Response, error) { - if !strings.HasSuffix(r.URL.Path, "/query_range") { - return next.RoundTrip(r) + rt := NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) + return base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { + _, ok := r.(*LokiRequest) + if !ok { + return next.Do(ctx, r) } - return rt.RoundTrip(r) + return rt.Do(ctx, r) }) } return next - }, nil + }), nil } // NewInstantMetricTripperware creates a new frontend tripperware responsible for handling metric queries @@ -750,14 +699,14 @@ func NewInstantMetricTripperware( log log.Logger, limits Limits, schema config.SchemaConfig, - codec queryrangebase.Codec, + merger base.Merger, metrics *Metrics, - indexStatsTripperware queryrangebase.Tripperware, -) (queryrangebase.Tripperware, error) { - return func(next http.RoundTripper) http.RoundTripper { - statsHandler := queryrangebase.NewRoundTripperHandler(indexStatsTripperware(next), codec) + indexStatsTripperware base.Middleware, +) (base.Middleware, error) { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { + statsHandler := indexStatsTripperware.Wrap(next) - queryRangeMiddleware := []queryrangebase.Middleware{ + queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(limits), NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), @@ -770,7 +719,6 @@ func NewInstantMetricTripperware( log, schema.Configs, engineOpts, - codec, metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware metrics.MiddlewareMapperMetrics.shardMapper, limits, @@ -783,16 +731,16 @@ func NewInstantMetricTripperware( if cfg.MaxRetries > 0 { queryRangeMiddleware = append( queryRangeMiddleware, - queryrangebase.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), - queryrangebase.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), + base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), ) } if len(queryRangeMiddleware) > 0 { - return NewLimitedRoundTripper(next, codec, limits, schema.Configs, queryRangeMiddleware...) + return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) } return next - }, nil + }), nil } func NewVolumeTripperware( @@ -800,28 +748,28 @@ func NewVolumeTripperware( log log.Logger, limits Limits, schema config.SchemaConfig, - codec queryrangebase.Codec, + merger base.Merger, c cache.Cache, - cacheGenNumLoader queryrangebase.CacheGenNumberLoader, + cacheGenNumLoader base.CacheGenNumberLoader, retentionEnabled bool, metrics *Metrics, -) (queryrangebase.Tripperware, error) { +) (base.Middleware, error) { // Parallelize the volume requests, so it doesn't send a huge request to a single index-gw (i.e. {app=~".+"} for 30d). // Indices are sharded by 24 hours, so we split the volume request in 24h intervals. limits = WithSplitByLimits(limits, 24*time.Hour) - var cacheMiddleware queryrangebase.Middleware + var cacheMiddleware base.Middleware if cfg.CacheVolumeResults { var err error cacheMiddleware, err = NewVolumeCacheMiddleware( log, limits, - codec, + merger, c, cacheGenNumLoader, - func(_ context.Context, r queryrangebase.Request) bool { + func(_ context.Context, r base.Request) bool { return !r.GetCachingOptions().Disabled }, - func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int { + func(ctx context.Context, tenantIDs []string, r base.Request) int { return MinWeightedParallelism( ctx, tenantIDs, @@ -843,7 +791,7 @@ func NewVolumeTripperware( indexTw, err := sharedIndexTripperware( cacheMiddleware, cfg, - codec, + merger, limits, log, metrics, @@ -855,47 +803,33 @@ func NewVolumeTripperware( } return volumeFeatureFlagRoundTripper( - volumeRangeTripperware(codec, indexTw), + volumeRangeTripperware(indexTw), limits, ), nil } -func volumeRangeTripperware(codec queryrangebase.Codec, nextTW queryrangebase.Tripperware) func(next http.RoundTripper) http.RoundTripper { - return func(next http.RoundTripper) http.RoundTripper { - nextRT := nextTW(next) - - return queryrangebase.RoundTripFunc(func(r *http.Request) (*http.Response, error) { - request, err := codec.DecodeRequest(r.Context(), r, nil) - if err != nil { - return nil, err - } - - seriesVolumeMiddlewares := []queryrangebase.Middleware{ +func volumeRangeTripperware(nextTW base.Middleware) base.Middleware { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { + return base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { + seriesVolumeMiddlewares := []base.Middleware{ StatsCollectorMiddleware(), NewVolumeMiddleware(), + nextTW, } // wrap nextRT with our new middleware - response, err := queryrangebase.MergeMiddlewares( + return base.MergeMiddlewares( seriesVolumeMiddlewares..., - ).Wrap( - VolumeDownstreamHandler(nextRT, codec), - ).Do(r.Context(), request) - - if err != nil { - return nil, err - } - - return codec.EncodeResponse(r.Context(), r, response) + ).Wrap(next).Do(ctx, r) }) - } + }) } -func volumeFeatureFlagRoundTripper(nextTW queryrangebase.Tripperware, limits Limits) func(next http.RoundTripper) http.RoundTripper { - return func(next http.RoundTripper) http.RoundTripper { - nextRt := nextTW(next) - return queryrangebase.RoundTripFunc(func(r *http.Request) (*http.Response, error) { - userID, err := user.ExtractOrgID(r.Context()) +func volumeFeatureFlagRoundTripper(nextTW base.Middleware, limits Limits) base.Middleware { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { + nextRt := nextTW.Wrap(next) + return base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { + userID, err := user.ExtractOrgID(ctx) if err != nil { return nil, err } @@ -904,9 +838,9 @@ func volumeFeatureFlagRoundTripper(nextTW queryrangebase.Tripperware, limits Lim return nil, httpgrpc.Errorf(http.StatusNotFound, "not found") } - return nextRt.RoundTrip(r) + return nextRt.Do(ctx, r) }) - } + }) } func NewIndexStatsTripperware( @@ -914,29 +848,29 @@ func NewIndexStatsTripperware( log log.Logger, limits Limits, schema config.SchemaConfig, - codec queryrangebase.Codec, + merger base.Merger, c cache.Cache, - cacheGenNumLoader queryrangebase.CacheGenNumberLoader, + cacheGenNumLoader base.CacheGenNumberLoader, retentionEnabled bool, metrics *Metrics, -) (queryrangebase.Tripperware, error) { +) (base.Middleware, error) { // Parallelize the index stats requests, so it doesn't send a huge request to a single index-gw (i.e. {app=~".+"} for 30d). // Indices are sharded by 24 hours, so we split the stats request in 24h intervals. limits = WithSplitByLimits(limits, 24*time.Hour) - var cacheMiddleware queryrangebase.Middleware + var cacheMiddleware base.Middleware if cfg.CacheIndexStatsResults { var err error cacheMiddleware, err = NewIndexStatsCacheMiddleware( log, limits, - codec, + merger, c, cacheGenNumLoader, - func(_ context.Context, r queryrangebase.Request) bool { + func(_ context.Context, r base.Request) bool { return !r.GetCachingOptions().Disabled }, - func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int { + func(ctx context.Context, tenantIDs []string, r base.Request) int { return MinWeightedParallelism( ctx, tenantIDs, @@ -958,7 +892,7 @@ func NewIndexStatsTripperware( return sharedIndexTripperware( cacheMiddleware, cfg, - codec, + merger, limits, log, metrics, @@ -967,25 +901,25 @@ func NewIndexStatsTripperware( } func sharedIndexTripperware( - cacheMiddleware queryrangebase.Middleware, + cacheMiddleware base.Middleware, cfg Config, - codec queryrangebase.Codec, + merger base.Merger, limits Limits, log log.Logger, metrics *Metrics, schema config.SchemaConfig, -) (queryrangebase.Tripperware, error) { - return func(next http.RoundTripper) http.RoundTripper { - middlewares := []queryrangebase.Middleware{ +) (base.Middleware, error) { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { + middlewares := []base.Middleware{ NewLimitsMiddleware(limits), - queryrangebase.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - SplitByIntervalMiddleware(schema.Configs, limits, codec, splitByTime, metrics.SplitByMetrics), + base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), + SplitByIntervalMiddleware(schema.Configs, limits, merger, splitByTime, metrics.SplitByMetrics), } if cacheMiddleware != nil { middlewares = append( middlewares, - queryrangebase.InstrumentMiddleware("log_results_cache", metrics.InstrumentMiddlewareMetrics), + base.InstrumentMiddleware("log_results_cache", metrics.InstrumentMiddlewareMetrics), cacheMiddleware, ) } @@ -993,11 +927,11 @@ func sharedIndexTripperware( if cfg.MaxRetries > 0 { middlewares = append( middlewares, - queryrangebase.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), - queryrangebase.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), + base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), ) } - return queryrangebase.NewRoundTripper(next, codec, nil, middlewares...) - }, nil + return base.MergeMiddlewares(middlewares...).Wrap(next) + }), nil } diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index a06dc98e93f34..6c8f6d8af5943 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -1,22 +1,17 @@ package queryrange import ( - "bytes" "context" + "errors" "fmt" - "io" "math" "net/http" - "net/http/httptest" - "net/url" "sort" - "strconv" "sync" "testing" "time" "github.com/grafana/dskit/httpgrpc" - "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/user" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -30,12 +25,12 @@ import ( "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/stats" - "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" + base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/config" + "github.com/grafana/loki/pkg/storage/stores/index/seriesvolume" "github.com/grafana/loki/pkg/util" util_log "github.com/grafana/loki/pkg/util/log" - "github.com/grafana/loki/pkg/util/marshal" "github.com/grafana/loki/pkg/util/validation" valid "github.com/grafana/loki/pkg/validation" ) @@ -43,11 +38,11 @@ import ( var ( testTime = time.Date(2019, 12, 2, 11, 10, 10, 10, time.UTC) testConfig = Config{ - Config: queryrangebase.Config{ + Config: base.Config{ AlignQueriesWithStep: true, MaxRetries: 3, CacheResults: true, - ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ + ResultsCacheConfig: base.ResultsCacheConfig{ CacheConfig: cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ Enabled: true, @@ -60,7 +55,7 @@ var ( Transformer: nil, CacheIndexStatsResults: true, StatsCacheConfig: IndexStatsCacheConfig{ - ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ + ResultsCacheConfig: base.ResultsCacheConfig{ CacheConfig: cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ Enabled: true, @@ -71,7 +66,7 @@ var ( }, }, VolumeCacheConfig: VolumeCacheConfig{ - ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ + ResultsCacheConfig: base.ResultsCacheConfig{ CacheConfig: cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ Enabled: true, @@ -152,19 +147,16 @@ var ( } ) -func getQueryAndStatsHandler(queryHandler, statsHandler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/loki/api/v1/index/stats" { - statsHandler.ServeHTTP(w, r) - return +func getQueryAndStatsHandler(queryHandler, statsHandler base.Handler) base.Handler { + return base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { + switch r.(type) { + case *logproto.IndexStatsRequest: + return statsHandler.Do(ctx, r) + case *LokiRequest, *LokiInstantRequest: + return queryHandler.Do(ctx, r) } - if r.URL.Path == "/loki/api/v1/query_range" || r.URL.Path == "/loki/api/v1/query" { - queryHandler.ServeHTTP(w, r) - return - } - - panic("Request not supported") + return nil, fmt.Errorf("Request not supported: %T", r) }) } @@ -181,7 +173,7 @@ func TestMetricsTripperware(t *testing.T) { noCacheTestCfg := testConfig noCacheTestCfg.CacheResults = false noCacheTestCfg.CacheIndexStatsResults = false - tpw, stopper, err := NewTripperware(noCacheTestCfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{ + tpw, stopper, err := NewMiddleware(noCacheTestCfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{ Configs: testSchemasTSDB, }, nil, false, nil) if stopper != nil { @@ -200,20 +192,12 @@ func TestMetricsTripperware(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) // Test MaxQueryBytesRead limit statsCount, statsHandler := indexStatsResult(logproto.IndexStatsResponse{Bytes: 2000}) queryCount, queryHandler := counter() - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - _, err = tpw(rt).RoundTrip(req) + h := getQueryAndStatsHandler(queryHandler, statsHandler) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Error(t, err) require.Equal(t, 1, *statsCount) require.Equal(t, 0, *queryCount) @@ -221,28 +205,23 @@ func TestMetricsTripperware(t *testing.T) { // Test MaxQuerierBytesRead limit statsCount, statsHandler = indexStatsResult(logproto.IndexStatsResponse{Bytes: 200}) queryCount, queryHandler = counter() - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - _, err = tpw(rt).RoundTrip(req) + h = getQueryAndStatsHandler(queryHandler, statsHandler) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Error(t, err) require.Equal(t, 0, *queryCount) require.Equal(t, 2, *statsCount) // testing retry _, statsHandler = indexStatsResult(logproto.IndexStatsResponse{Bytes: 10}) - retries, queryHandler := counter() - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - _, err = tpw(rt).RoundTrip(req) + retries, queryHandler := counterWithError(errors.New("handle error")) + h = getQueryAndStatsHandler(queryHandler, statsHandler) + _, err = tpw.Wrap(h).Do(ctx, lreq) // 3 retries configured. require.GreaterOrEqual(t, *retries, 3) require.Error(t, err) - rt.Close() - - rt, err = newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() // Configure with cache - tpw, stopper, err = NewTripperware(testConfig, testEngineOpts, util_log.Logger, l, config.SchemaConfig{ + tpw, stopper, err = NewMiddleware(testConfig, testEngineOpts, util_log.Logger, l, config.SchemaConfig{ Configs: testSchemasTSDB, }, nil, false, nil) if stopper != nil { @@ -253,23 +232,19 @@ func TestMetricsTripperware(t *testing.T) { // testing split interval _, statsHandler = indexStatsResult(logproto.IndexStatsResponse{Bytes: 10}) count, queryHandler := promqlResult(matrix) - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - resp, err := tpw(rt).RoundTrip(req) + h = getQueryAndStatsHandler(queryHandler, statsHandler) + lokiResponse, err := tpw.Wrap(h).Do(ctx, lreq) // 2 queries require.Equal(t, 2, *count) require.NoError(t, err) - lokiResponse, err := DefaultCodec.DecodeResponse(ctx, resp, lreq) - require.NoError(t, err) // testing cache count, queryHandler = counter() - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - cacheResp, err := tpw(rt).RoundTrip(req) + h = getQueryAndStatsHandler(queryHandler, statsHandler) + lokiCacheResponse, err := tpw.Wrap(h).Do(ctx, lreq) // 0 queries result are cached. require.Equal(t, 0, *count) require.NoError(t, err) - lokiCacheResponse, err := DefaultCodec.DecodeResponse(ctx, cacheResp, lreq) - require.NoError(t, err) require.Equal(t, lokiResponse.(*LokiPromResponse).Response, lokiCacheResponse.(*LokiPromResponse).Response) } @@ -284,14 +259,11 @@ func TestLogFilterTripperware(t *testing.T) { noCacheTestCfg := testConfig noCacheTestCfg.CacheResults = false noCacheTestCfg.CacheIndexStatsResults = false - tpw, stopper, err := NewTripperware(noCacheTestCfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemasTSDB}, nil, false, nil) + tpw, stopper, err := NewMiddleware(noCacheTestCfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemasTSDB}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() lreq := &LokiRequest{ Query: `{app="foo"} |= "foo"`, @@ -303,38 +275,29 @@ func TestLogFilterTripperware(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) // testing limit count, h := promqlResult(streams) - rt.setHandler(h) - _, err = tpw(rt).RoundTrip(req) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Equal(t, 0, *count) require.Error(t, err) // set the query length back to normal lreq.StartTs = testTime.Add(-6 * time.Hour) - req, err = DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) // testing retry _, statsHandler := indexStatsResult(logproto.IndexStatsResponse{Bytes: 10}) - retries, queryHandler := counter() - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - _, err = tpw(rt).RoundTrip(req) + retries, queryHandler := counterWithError(errors.New("handler failed")) + h = getQueryAndStatsHandler(queryHandler, statsHandler) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.GreaterOrEqual(t, *retries, 3) require.Error(t, err) // Test MaxQueryBytesRead limit statsCount, statsHandler := indexStatsResult(logproto.IndexStatsResponse{Bytes: 2000}) queryCount, queryHandler := counter() - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - _, err = tpw(rt).RoundTrip(req) + h = getQueryAndStatsHandler(queryHandler, statsHandler) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Error(t, err) require.Equal(t, 1, *statsCount) require.Equal(t, 0, *queryCount) @@ -342,8 +305,8 @@ func TestLogFilterTripperware(t *testing.T) { // Test MaxQuerierBytesRead limit statsCount, statsHandler = indexStatsResult(logproto.IndexStatsResponse{Bytes: 200}) queryCount, queryHandler = counter() - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - _, err = tpw(rt).RoundTrip(req) + h = getQueryAndStatsHandler(queryHandler, statsHandler) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Error(t, err) require.Equal(t, 2, *statsCount) require.Equal(t, 0, *queryCount) @@ -362,14 +325,11 @@ func TestInstantQueryTripperware(t *testing.T) { queryTimeout: 1 * time.Minute, maxSeries: 1, } - tpw, stopper, err := NewTripperware(testShardingConfigNoCache, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemasTSDB}, nil, false, nil) + tpw, stopper, err := NewMiddleware(testShardingConfigNoCache, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemasTSDB}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() lreq := &LokiInstantRequest{ Query: `sum by (job) (bytes_rate({cluster="dev-us-central-0"}[15m]))`, @@ -380,18 +340,12 @@ func TestInstantQueryTripperware(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) // Test MaxQueryBytesRead limit statsCount, statsHandler := indexStatsResult(logproto.IndexStatsResponse{Bytes: 2000}) queryCount, queryHandler := counter() - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - _, err = tpw(rt).RoundTrip(req) + h := getQueryAndStatsHandler(queryHandler, statsHandler) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Error(t, err) require.Equal(t, 1, *statsCount) require.Equal(t, 0, *queryCount) @@ -399,33 +353,28 @@ func TestInstantQueryTripperware(t *testing.T) { // Test MaxQuerierBytesRead limit statsCount, statsHandler = indexStatsResult(logproto.IndexStatsResponse{Bytes: 200}) queryCount, queryHandler = counter() - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - _, err = tpw(rt).RoundTrip(req) + h = getQueryAndStatsHandler(queryHandler, statsHandler) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Error(t, err) require.Equal(t, 2, *statsCount) require.Equal(t, 0, *queryCount) count, queryHandler := promqlResult(vector) _, statsHandler = indexStatsResult(logproto.IndexStatsResponse{Bytes: 10}) - rt.setHandler(getQueryAndStatsHandler(queryHandler, statsHandler)) - resp, err := tpw(rt).RoundTrip(req) + h = getQueryAndStatsHandler(queryHandler, statsHandler) + lokiResponse, err := tpw.Wrap(h).Do(ctx, lreq) require.Equal(t, 1, *count) require.NoError(t, err) - lokiResponse, err := DefaultCodec.DecodeResponse(ctx, resp, lreq) - require.NoError(t, err) require.IsType(t, &LokiPromResponse{}, lokiResponse) } func TestSeriesTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() lreq := &LokiSeriesRequest{ Match: []string{`{job="varlogs"}`}, @@ -435,22 +384,15 @@ func TestSeriesTripperware(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) + count, h := seriesResult(series) + lokiSeriesResponse, err := tpw.Wrap(h).Do(ctx, lreq) require.NoError(t, err) - count, h := seriesResult(series) - rt.setHandler(h) - resp, err := tpw(rt).RoundTrip(req) // 2 queries require.Equal(t, 2, *count) - require.NoError(t, err) - lokiSeriesResponse, err := DefaultCodec.DecodeResponse(ctx, resp, lreq) res, ok := lokiSeriesResponse.(*LokiSeriesResponse) - require.Equal(t, true, ok) + require.True(t, ok) // make sure we return unique series since responses from // SplitByInterval middleware might have duplicate series @@ -459,14 +401,11 @@ func TestSeriesTripperware(t *testing.T) { } func TestLabelsTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() lreq := NewLabelRequest( testTime.Add(-25*time.Hour), // bigger than the limit @@ -477,44 +416,42 @@ func TestLabelsTripperware(t *testing.T) { ) ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) handler := newFakeHandler( // we expect 2 calls. - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.NoError(t, marshal.WriteLabelResponseJSON([]string{"foo", "bar", "blop"}, w)) + base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { + return &LokiLabelNamesResponse{ + Status: "success", + Data: []string{"foo", "bar", "blop"}, + Version: uint32(1), + }, nil }), - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.NoError(t, marshal.WriteLabelResponseJSON([]string{"foo", "bar", "blip"}, w)) + base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { + return &LokiLabelNamesResponse{ + Status: "success", + Data: []string{"foo", "bar", "blip"}, + Version: uint32(1), + }, nil }), ) - rt.setHandler(handler) - resp, err := tpw(rt).RoundTrip(req) + lokiLabelsResponse, err := tpw.Wrap(handler).Do(ctx, lreq) + require.NoError(t, err) + // verify 2 calls have been made to downstream. require.Equal(t, 2, handler.count) - require.NoError(t, err) - lokiLabelsResponse, err := DefaultCodec.DecodeResponse(ctx, resp, lreq) res, ok := lokiLabelsResponse.(*LokiLabelNamesResponse) - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, []string{"foo", "bar", "blop", "blip"}, res.Data) require.Equal(t, "success", res.Status) require.NoError(t, err) } func TestIndexStatsTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() lreq := &logproto.IndexStatsRequest{ Matchers: `{job="varlogs"}`, @@ -523,12 +460,6 @@ func TestIndexStatsTripperware(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) response := logproto.IndexStatsResponse{ Streams: 100, @@ -538,8 +469,7 @@ func TestIndexStatsTripperware(t *testing.T) { } count, h := indexStatsResult(response) - rt.setHandler(h) - _, err = tpw(rt).RoundTrip(req) + _, err = tpw.Wrap(h).Do(ctx, lreq) // 2 queries require.Equal(t, 2, *count) require.NoError(t, err) @@ -547,14 +477,11 @@ func TestIndexStatsTripperware(t *testing.T) { // Test the cache. // It should have the answer already so the query handler shouldn't be hit count, h = indexStatsResult(response) - rt.setHandler(h) - resp, err := tpw(rt).RoundTrip(req) + indexStatsResponse, err := tpw.Wrap(h).Do(ctx, lreq) require.NoError(t, err) require.Equal(t, 0, *count) // Test the response is the expected - indexStatsResponse, err := DefaultCodec.DecodeResponse(ctx, resp, lreq) - require.NoError(t, err) res, ok := indexStatsResponse.(*IndexStatsResponse) require.Equal(t, true, ok) require.Equal(t, response.Streams*2, res.Response.Streams) @@ -565,47 +492,37 @@ func TestIndexStatsTripperware(t *testing.T) { func TestVolumeTripperware(t *testing.T) { t.Run("instant queries hardcode step to 0 and return a prometheus style vector response", func(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, volumeEnabled: true}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + limits := fakeLimits{ + maxQueryLength: 48 * time.Hour, + volumeEnabled: true, + maxSeries: 42, + } + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() - lreq := &logproto.VolumeRequest{ - Matchers: `{job="varlogs"}`, - From: model.TimeFromUnixNano(testTime.Add(-25 * time.Hour).UnixNano()), // bigger than split by interval limit - Through: model.TimeFromUnixNano(testTime.UnixNano()), - Limit: 10, - Step: 42, // this should be ignored and set to 0 + Matchers: `{job="varlogs"}`, + From: model.TimeFromUnixNano(testTime.Add(-25 * time.Hour).UnixNano()), // bigger than split by interval limit + Through: model.TimeFromUnixNano(testTime.UnixNano()), + Limit: 10, + Step: 0, // Travis/Trevor: this should be ignored and set to 0. Karsten: Why? + AggregateBy: seriesvolume.DefaultAggregateBy, } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - - req.URL.Path = "/loki/api/v1/index/volume" count, h := seriesVolumeResult(seriesVolume) - rt.setHandler(h) - resp, err := tpw(rt).RoundTrip(req) + volumeResp, err := tpw.Wrap(h).Do(ctx, lreq) require.NoError(t, err) require.Equal(t, 2, *count) // 2 queries from splitting - volumeResp, err := DefaultCodec.DecodeResponse(ctx, resp, nil) - require.NoError(t, err) - - expected := queryrangebase.PrometheusData{ + expected := base.PrometheusData{ ResultType: loghttp.ResultTypeVector, - Result: []queryrangebase.SampleStream{ + Result: []base.SampleStream{ { Labels: []logproto.LabelAdapter{{ Name: "bar", @@ -636,41 +553,29 @@ func TestVolumeTripperware(t *testing.T) { }) t.Run("range queries return a prometheus style metrics response, putting volumes in buckets based on the step", func(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, volumeEnabled: true}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, volumeEnabled: true}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() - start := testTime.Add(-5 * time.Hour) end := testTime lreq := &logproto.VolumeRequest{ - Matchers: `{job="varlogs"}`, - From: model.TimeFromUnixNano(start.UnixNano()), // bigger than split by interval limit - Through: model.TimeFromUnixNano(end.UnixNano()), - Step: time.Hour.Milliseconds(), - Limit: 10, + Matchers: `{job="varlogs"}`, + From: model.TimeFromUnixNano(start.UnixNano()), // bigger than split by interval limit + Through: model.TimeFromUnixNano(end.UnixNano()), + Step: time.Hour.Milliseconds(), + Limit: 10, + AggregateBy: seriesvolume.DefaultAggregateBy, } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - - req.URL.Path = "/loki/api/v1/index/volume_range" count, h := seriesVolumeResult(seriesVolume) - rt.setHandler(h) - resp, err := tpw(rt).RoundTrip(req) + volumeResp, err := tpw.Wrap(h).Do(ctx, lreq) require.NoError(t, err) /* @@ -680,9 +585,6 @@ func TestVolumeTripperware(t *testing.T) { */ require.Equal(t, 6, *count) // 6 queries from splitting into step buckets - volumeResp, err := DefaultCodec.DecodeResponse(ctx, resp, nil) - require.NoError(t, err) - barBazExpectedSamples := []logproto.LegacySample{} util.ForInterval(time.Hour, start, end, true, func(s, _ time.Time) { barBazExpectedSamples = append(barBazExpectedSamples, logproto.LegacySample{ @@ -705,9 +607,9 @@ func TestVolumeTripperware(t *testing.T) { return fooBarExpectedSamples[i].TimestampMs < fooBarExpectedSamples[j].TimestampMs }) - expected := queryrangebase.PrometheusData{ + expected := base.PrometheusData{ ResultType: loghttp.ResultTypeMatrix, - Result: []queryrangebase.SampleStream{ + Result: []base.SampleStream{ { Labels: []logproto.LabelAdapter{{ Name: "bar", @@ -742,7 +644,7 @@ func TestNewTripperware_Caches(t *testing.T) { { name: "results cache disabled, stats cache disabled", config: Config{ - Config: queryrangebase.Config{ + Config: base.Config{ CacheResults: false, }, CacheIndexStatsResults: false, @@ -753,9 +655,9 @@ func TestNewTripperware_Caches(t *testing.T) { { name: "results cache enabled, stats cache disabled", config: Config{ - Config: queryrangebase.Config{ + Config: base.Config{ CacheResults: true, - ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ + ResultsCacheConfig: base.ResultsCacheConfig{ CacheConfig: cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ MaxSizeMB: 1, @@ -772,9 +674,9 @@ func TestNewTripperware_Caches(t *testing.T) { { name: "results cache enabled, stats cache enabled", config: Config{ - Config: queryrangebase.Config{ + Config: base.Config{ CacheResults: true, - ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ + ResultsCacheConfig: base.ResultsCacheConfig{ CacheConfig: cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ MaxSizeMB: 1, @@ -791,9 +693,9 @@ func TestNewTripperware_Caches(t *testing.T) { { name: "results cache enabled, stats cache enabled but different", config: Config{ - Config: queryrangebase.Config{ + Config: base.Config{ CacheResults: true, - ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ + ResultsCacheConfig: base.ResultsCacheConfig{ CacheConfig: cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ Enabled: true, @@ -804,7 +706,7 @@ func TestNewTripperware_Caches(t *testing.T) { }, CacheIndexStatsResults: true, StatsCacheConfig: IndexStatsCacheConfig{ - ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ + ResultsCacheConfig: base.ResultsCacheConfig{ CacheConfig: cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ Enabled: true, @@ -820,7 +722,7 @@ func TestNewTripperware_Caches(t *testing.T) { { name: "results cache enabled (no config provided)", config: Config{ - Config: queryrangebase.Config{ + Config: base.Config{ CacheResults: true, }, }, @@ -829,7 +731,7 @@ func TestNewTripperware_Caches(t *testing.T) { { name: "results cache disabled, stats cache enabled (no config provided)", config: Config{ - Config: queryrangebase.Config{ + Config: base.Config{ CacheResults: false, }, CacheIndexStatsResults: true, @@ -839,7 +741,7 @@ func TestNewTripperware_Caches(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - _, stopper, err := NewTripperware(tc.config, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + _, stopper, err := NewMiddleware(tc.config, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } @@ -869,14 +771,11 @@ func TestNewTripperware_Caches(t *testing.T) { } func TestLogNoFilter(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() lreq := &LokiRequest{ Query: `{app="foo"}`, @@ -888,126 +787,44 @@ func TestLogNoFilter(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) count, h := promqlResult(streams) - rt.setHandler(h) - _, err = tpw(rt).RoundTrip(req) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Equal(t, 1, *count) require.Nil(t, err) } -func TestRegexpParamsSupport(t *testing.T) { - l := WithSplitByLimits(fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, 4*time.Hour) - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) - if stopper != nil { - defer stopper.Stop() - } - require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() - - lreq := &LokiRequest{ - Query: `{app="foo"}`, - Limit: 1000, - StartTs: testTime.Add(-6 * time.Hour), - EndTs: testTime, - Direction: logproto.FORWARD, - Path: "/loki/api/v1/query_range", - } - - ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - // fudge a regexp params - params := req.URL.Query() - params.Set("regexp", "foo") - req.URL.RawQuery = params.Encode() - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - - count, h := promqlResult(streams) - rt.setHandler(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - // the query params should contain the filter. - require.Contains(t, r.URL.Query().Get("query"), `|~ "foo"`) - h.ServeHTTP(rw, r) - })) - _, err = tpw(rt).RoundTrip(req) - require.Equal(t, 2, *count) // expecting the query to also be splitted since it has a filter. - require.NoError(t, err) -} - func TestPostQueries(t *testing.T) { - req, err := http.NewRequest(http.MethodPost, "/loki/api/v1/query_range", nil) - data := url.Values{ - "query": {`{app="foo"} |~ "foo"`}, - } - body := bytes.NewBufferString(data.Encode()) - req.Body = io.NopCloser(body) - req.Header.Add("Content-Type", "application/x-www-form-urlencoded") - req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode()))) - req = req.WithContext(user.InjectOrgID(context.Background(), "1")) - require.NoError(t, err) - _, err = newRoundTripper( + lreq := &LokiRequest{Query: `{app="foo"} |~ "foo"`} + ctx := user.InjectOrgID(context.Background(), "1") + handler := base.HandlerFunc(func(context.Context, base.Request) (base.Response, error) { + t.Error("unexpected default roundtripper called") + return nil, nil + }) + _, err := newRoundTripper( util_log.Logger, - queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { - t.Error("unexpected default roundtripper called") - return nil, nil - }), - queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { - t.Error("unexpected default roundtripper called") - return nil, nil - }), - queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { - return nil, nil - }), - queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { - t.Error("unexpected metric roundtripper called") - return nil, nil - }), - queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { - t.Error("unexpected series roundtripper called") - return nil, nil - }), - queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { - t.Error("unexpected labels roundtripper called") - return nil, nil - }), - queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { - t.Error("unexpected instant roundtripper called") - return nil, nil - }), - queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { - t.Error("unexpected indexStats roundtripper called") - return nil, nil - }), - queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { - t.Error("unexpected labelVolume roundtripper called") + handler, + handler, + base.HandlerFunc(func(context.Context, base.Request) (base.Response, error) { return nil, nil }), + handler, + handler, + handler, + handler, + handler, + handler, fakeLimits{}, - ).RoundTrip(req) + ).Do(ctx, lreq) require.NoError(t, err) } func TestTripperware_EntriesLimit(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() lreq := &LokiRequest{ Query: `{app="foo"}`, @@ -1019,15 +836,16 @@ func TestTripperware_EntriesLimit(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) + called := false + h := base.HandlerFunc(func(context.Context, base.Request) (base.Response, error) { + called = true + return nil, nil + }) - _, err = tpw(rt).RoundTrip(req) + _, err = tpw.Wrap(h).Do(ctx, lreq) require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "max entries limit per query exceeded, limit > max_entries_limit (10000 > 5000)"), err) + require.False(t, called) } func TestTripperware_RequiredLabels(t *testing.T) { @@ -1048,16 +866,12 @@ func TestTripperware_RequiredLabels(t *testing.T) { } { t.Run(test.qs, func(t *testing.T) { limits := fakeLimits{maxEntriesLimitPerQuery: 5000, maxQueryParallelism: 1, requiredLabels: []string{"app"}} - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() _, h := promqlResult(test.response) - rt.setHandler(h) lreq := &LokiRequest{ Query: test.qs, @@ -1067,16 +881,13 @@ func TestTripperware_RequiredLabels(t *testing.T) { Direction: logproto.FORWARD, Path: "/loki/api/v1/query_range", } + // See loghttp.step + step := time.Duration(int(math.Max(math.Floor(lreq.EndTs.Sub(lreq.StartTs).Seconds()/250), 1))) * time.Second + lreq.Step = step.Milliseconds() ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - _, err = tpw(rt).RoundTrip(req) + _, err = tpw.Wrap(h).Do(ctx, lreq) if test.expectedError != "" { require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, test.expectedError), err) } else { @@ -1159,17 +970,13 @@ func TestTripperware_RequiredNumberLabels(t *testing.T) { maxQueryParallelism: 1, requiredNumberLabels: tc.requiredNumberLabels, } - tpw, stopper, err := NewTripperware(testConfig, testEngineOpts, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() _, h := promqlResult(tc.response) - rt.setHandler(h) lreq := &LokiRequest{ Query: tc.query, @@ -1179,16 +986,13 @@ func TestTripperware_RequiredNumberLabels(t *testing.T) { Direction: logproto.FORWARD, Path: "/loki/api/v1/query_range", } + // See loghttp.step + step := time.Duration(int(math.Max(math.Floor(lreq.EndTs.Sub(lreq.StartTs).Seconds()/250), 1))) * time.Second + lreq.Step = step.Milliseconds() ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, lreq) - require.NoError(t, err) - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - - _, err = tpw(rt).RoundTrip(req) + _, err = tpw.Wrap(h).Do(ctx, lreq) if tc.expectedError != noErr { require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, tc.expectedError), err) } else { @@ -1284,7 +1088,7 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) { for _, tc := range []struct { name string - request queryrangebase.Request + request base.Request expectedSplitStats int64 expectedShardStats int64 }{ @@ -1342,30 +1146,16 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - tpw, stopper, err := NewTripperware(statsTestCfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: statsSchemas}, nil, false, nil) + tpw, stopper, err := NewMiddleware(statsTestCfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: statsSchemas}, nil, false, nil) if stopper != nil { defer stopper.Stop() } require.NoError(t, err) ctx := user.InjectOrgID(context.Background(), "1") - req, err := DefaultCodec.EncodeRequest(ctx, tc.request) - require.NoError(t, err) - - req = req.WithContext(ctx) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - require.NoError(t, err) - - rt, err := newfakeRoundTripper() - require.NoError(t, err) - defer rt.Close() _, h := promqlResult(matrix) - rt.setHandler(h) - resp, err := tpw(rt).RoundTrip(req) - require.NoError(t, err) - - lokiResponse, err := DefaultCodec.DecodeResponse(ctx, resp, tc.request) + lokiResponse, err := tpw.Wrap(h).Do(ctx, tc.request) require.NoError(t, err) require.Equal(t, tc.expectedSplitStats, lokiResponse.(*LokiPromResponse).Statistics.Summary.Splits) @@ -1474,110 +1264,97 @@ func (f fakeLimits) TSDBMaxBytesPerShard(_ string) int { return valid.DefaultTSDBMaxBytesPerShard } -func counter() (*int, http.Handler) { +func counter() (*int, base.Handler) { count := 0 var lock sync.Mutex - return &count, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return &count, base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { lock.Lock() defer lock.Unlock() count++ + return base.NewEmptyPrometheusResponse(), nil }) } -func promqlResult(v parser.Value) (*int, http.Handler) { +func counterWithError(err error) (*int, base.Handler) { count := 0 var lock sync.Mutex - return &count, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return &count, base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { lock.Lock() defer lock.Unlock() - if err := marshal.WriteQueryResponseJSON(v, stats.Result{}, w); err != nil { - panic(err) - } count++ + return nil, err }) } -func seriesResult(v logproto.SeriesResponse) (*int, http.Handler) { +func promqlResult(v parser.Value) (*int, base.Handler) { count := 0 var lock sync.Mutex - return &count, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return &count, base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { lock.Lock() defer lock.Unlock() - if err := marshal.WriteSeriesResponseJSON(v.GetSeries(), w); err != nil { - panic(err) + count++ + params, err := ParamsFromRequest(r) + if err != nil { + return nil, err } + result := logqlmodel.Result{Data: v} + return ResultToResponse(result, params) + }) +} + +func seriesResult(v logproto.SeriesResponse) (*int, base.Handler) { + count := 0 + var lock sync.Mutex + return &count, base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { + lock.Lock() + defer lock.Unlock() count++ + return &LokiSeriesResponse{ + Status: "success", + Version: 1, + Data: v.Series, + }, nil }) } -func indexStatsResult(v logproto.IndexStatsResponse) (*int, http.Handler) { +func indexStatsResult(v logproto.IndexStatsResponse) (*int, base.Handler) { count := 0 var lock sync.Mutex - return &count, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return &count, base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { lock.Lock() defer lock.Unlock() - if err := marshal.WriteIndexStatsResponseJSON(&v, w); err != nil { - panic(err) - } count++ + return &IndexStatsResponse{Response: &v}, nil }) } -func seriesVolumeResult(v logproto.VolumeResponse) (*int, http.Handler) { +func seriesVolumeResult(v logproto.VolumeResponse) (*int, base.Handler) { count := 0 var lock sync.Mutex - return &count, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return &count, base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { lock.Lock() defer lock.Unlock() - if err := marshal.WriteVolumeResponseJSON(&v, w); err != nil { - panic(err) - } count++ + return &VolumeResponse{Response: &v}, nil }) } type fakeHandler struct { count int lock sync.Mutex - calls []http.Handler + calls []base.Handler } -func newFakeHandler(calls ...http.Handler) *fakeHandler { +func newFakeHandler(calls ...base.Handler) *fakeHandler { return &fakeHandler{calls: calls} } -func (f *fakeHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { +func (f *fakeHandler) Do(ctx context.Context, req base.Request) (base.Response, error) { f.lock.Lock() defer f.lock.Unlock() - f.calls[f.count].ServeHTTP(w, req) + r, err := f.calls[f.count].Do(ctx, req) f.count++ -} - -type fakeRoundTripper struct { - *httptest.Server - host string -} - -func newfakeRoundTripper() (*fakeRoundTripper, error) { - s := httptest.NewServer(nil) - u, err := url.Parse(s.URL) - if err != nil { - return nil, err - } - return &fakeRoundTripper{ - Server: s, - host: u.Host, - }, nil -} - -func (s *fakeRoundTripper) setHandler(h http.Handler) { - s.Config.Handler = middleware.AuthenticateUser.Wrap(h) -} - -func (s fakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - r.URL.Scheme = "http" - r.URL.Host = s.host - return http.DefaultTransport.RoundTrip(r) + return r, err } func toMs(t time.Time) int64 { diff --git a/pkg/querier/queryrange/volume.go b/pkg/querier/queryrange/volume.go index 44b88a1d907ca..305397ff6d6e0 100644 --- a/pkg/querier/queryrange/volume.go +++ b/pkg/querier/queryrange/volume.go @@ -2,13 +2,10 @@ package queryrange import ( "context" - "net/http" "sort" "time" "github.com/grafana/dskit/concurrency" - "github.com/grafana/dskit/httpgrpc" - "github.com/grafana/dskit/user" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -22,26 +19,6 @@ import ( "github.com/grafana/loki/pkg/util" ) -func VolumeDownstreamHandler(nextRT http.RoundTripper, codec queryrangebase.Codec) queryrangebase.Handler { - return queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { - request, err := codec.EncodeRequest(ctx, req) - if err != nil { - return nil, err - } - - if err := user.InjectOrgIDIntoHTTPRequest(ctx, request); err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - resp, err := nextRT.RoundTrip(request) - if err != nil { - return nil, err - } - - return codec.DecodeResponse(ctx, resp, req) - }) -} - func NewVolumeMiddleware() queryrangebase.Middleware { return queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { return queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { diff --git a/pkg/util/limiter/combined_limits.go b/pkg/util/limiter/combined_limits.go index 59f0b6dec3a49..40d6fd508a4d4 100644 --- a/pkg/util/limiter/combined_limits.go +++ b/pkg/util/limiter/combined_limits.go @@ -5,8 +5,8 @@ import ( "github.com/grafana/loki/pkg/compactor" "github.com/grafana/loki/pkg/distributor" "github.com/grafana/loki/pkg/ingester" - "github.com/grafana/loki/pkg/querier" - "github.com/grafana/loki/pkg/querier/queryrange" + querier_limits "github.com/grafana/loki/pkg/querier/limits" + queryrange_limits "github.com/grafana/loki/pkg/querier/queryrange/limits" "github.com/grafana/loki/pkg/ruler" "github.com/grafana/loki/pkg/scheduler" "github.com/grafana/loki/pkg/storage" @@ -17,8 +17,8 @@ type CombinedLimits interface { compactor.Limits distributor.Limits ingester.Limits - querier.Limits - queryrange.Limits + querier_limits.Limits + queryrange_limits.Limits ruler.RulesLimits scheduler.Limits storage.StoreLimits diff --git a/pkg/util/querylimits/propagation.go b/pkg/util/querylimits/propagation.go index 75cce84e870dd..f0e5fbc8f6b49 100644 --- a/pkg/util/querylimits/propagation.go +++ b/pkg/util/querylimits/propagation.go @@ -44,14 +44,19 @@ func MarshalQueryLimits(limits *QueryLimits) ([]byte, error) { // InjectQueryLimitsHTTP adds the query limits to the request headers. func InjectQueryLimitsHTTP(r *http.Request, limits *QueryLimits) error { + return InjectQueryLimitsHeader(&r.Header, limits) +} + +// InjectQueryLimitsHeader adds the query limits to the headers. +func InjectQueryLimitsHeader(h *http.Header, limits *QueryLimits) error { // Ensure any existing policy sets are erased - r.Header.Del(HTTPHeaderQueryLimitsKey) + h.Del(HTTPHeaderQueryLimitsKey) encodedLimits, err := MarshalQueryLimits(limits) if err != nil { return err } - r.Header.Add(HTTPHeaderQueryLimitsKey, string(encodedLimits)) + h.Add(HTTPHeaderQueryLimitsKey, string(encodedLimits)) return nil } diff --git a/pkg/util/querylimits/tripperware.go b/pkg/util/querylimits/tripperware.go deleted file mode 100644 index a7608b98951b7..0000000000000 --- a/pkg/util/querylimits/tripperware.go +++ /dev/null @@ -1,51 +0,0 @@ -package querylimits - -import ( - "net/http" - - "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" -) - -type tripperwareWrapper struct { - next http.RoundTripper - wrapped http.RoundTripper -} - -// WrapTripperware wraps the existing tripperware to make sure the query limit policy headers are propagated -func WrapTripperware(existing queryrangebase.Tripperware) queryrangebase.Tripperware { - return func(next http.RoundTripper) http.RoundTripper { - limitsTrw := &tripperwareWrapper{ - next: next, - } - limitsTrw.wrapped = existing(queryrangebase.RoundTripFunc(limitsTrw.PostWrappedRoundTrip)) - return limitsTrw - } -} - -func (t *tripperwareWrapper) RoundTrip(r *http.Request) (*http.Response, error) { - ctx := r.Context() - - limits := ExtractQueryLimitsContext(ctx) - - if limits != nil { - ctx = InjectQueryLimitsContext(ctx, *limits) - r = r.Clone(ctx) - } - - return t.wrapped.RoundTrip(r) -} - -func (t *tripperwareWrapper) PostWrappedRoundTrip(r *http.Request) (*http.Response, error) { - ctx := r.Context() - - limits := ExtractQueryLimitsContext(ctx) - - if limits != nil { - err := InjectQueryLimitsHTTP(r, limits) - if err != nil { - return nil, err - } - } - - return t.next.RoundTrip(r) -} From 030ab857cd60b2ba2304acd9167e61e84c8c1be1 Mon Sep 17 00:00:00 2001 From: Steven Miller Date: Mon, 23 Oct 2023 11:18:42 -0400 Subject: [PATCH 16/33] Fix typo in Loki authentication docs (#11007) **What this PR does / why we need it**: Typo fix **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. --- docs/sources/operations/authentication.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/operations/authentication.md b/docs/sources/operations/authentication.md index 065cd207a5811..4235959b1f2c6 100644 --- a/docs/sources/operations/authentication.md +++ b/docs/sources/operations/authentication.md @@ -21,7 +21,7 @@ A list of open-source reverse proxies you can use: Note that when using Loki in multi-tenant mode, Loki requires the HTTP header `X-Scope-OrgID` to be set to a string identifying the tenant; the responsibility of populating this value should be handled by the authenticating reverse proxy. -For more inforamtion, read the [multi-tenancy]({{< relref "./multi-tenancy" >}}) documentation. +For more information, read the [multi-tenancy]({{< relref "./multi-tenancy" >}}) documentation. For information on authenticating Promtail, see the documentation for [how to configure Promtail]({{< relref "../send-data/promtail/configuration" >}}). From 0a7737e7c7eb81890b8f9a18af8020d6bc8a9808 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Mon, 23 Oct 2023 08:41:32 -0700 Subject: [PATCH 17/33] adds a merge-builder for building blocks from other blocks+storage (#10974) Some minor refactoring + a (simple) `MergeBuilder` struct to build bloom blocks from other blocks and storage, depending on what's already been indexed into the blocks. --- pkg/storage/bloom/v1/block.go | 8 ++ pkg/storage/bloom/v1/block_writer.go | 73 ------------ pkg/storage/bloom/v1/builder.go | 162 +++++++++++++++++++++++++-- pkg/storage/bloom/v1/dedupe.go | 21 ++-- pkg/storage/bloom/v1/dedupe_test.go | 2 +- pkg/storage/bloom/v1/index.go | 6 +- pkg/storage/bloom/v1/reader.go | 83 ++++++++++++++ 7 files changed, 258 insertions(+), 97 deletions(-) create mode 100644 pkg/storage/bloom/v1/reader.go diff --git a/pkg/storage/bloom/v1/block.go b/pkg/storage/bloom/v1/block.go index 0386604bfae5a..cf28fab615aae 100644 --- a/pkg/storage/bloom/v1/block.go +++ b/pkg/storage/bloom/v1/block.go @@ -21,6 +21,7 @@ type Block struct { reader BlockReader // should this be decoupled from the struct (accepted as method arg instead)? initialized bool + dataRange SeriesHeader } func NewBlock(reader BlockReader) *Block { @@ -41,6 +42,13 @@ func (b *Block) LoadHeaders() error { return errors.Wrap(err, "decoding index") } + // TODO(owen-d): better pattern + xs := make([]SeriesHeader, 0, len(b.index.pageHeaders)) + for _, h := range b.index.pageHeaders { + xs = append(xs, h.SeriesHeader) + } + b.dataRange = aggregateHeaders(xs) + blooms, err := b.reader.Blooms() if err != nil { return errors.Wrap(err, "getting blooms reader") diff --git a/pkg/storage/bloom/v1/block_writer.go b/pkg/storage/bloom/v1/block_writer.go index 30885d9b873e0..317d1e598414a 100644 --- a/pkg/storage/bloom/v1/block_writer.go +++ b/pkg/storage/bloom/v1/block_writer.go @@ -22,11 +22,6 @@ type BlockWriter interface { Size() (int, error) // byte size of accumualted index & blooms } -type BlockReader interface { - Index() (io.ReadSeeker, error) - Blooms() (io.ReadSeeker, error) -} - // in memory impl type MemoryBlockWriter struct { index, blooms *bytes.Buffer @@ -116,71 +111,3 @@ func (b *DirectoryBlockWriter) Size() (int, error) { } return size, nil } - -// In memory reader -type ByteReader struct { - index, blooms *bytes.Buffer -} - -func NewByteReader(index, blooms *bytes.Buffer) *ByteReader { - return &ByteReader{index: index, blooms: blooms} -} - -func (r *ByteReader) Index() (io.ReadSeeker, error) { - return bytes.NewReader(r.index.Bytes()), nil -} - -func (r *ByteReader) Blooms() (io.ReadSeeker, error) { - return bytes.NewReader(r.blooms.Bytes()), nil -} - -// File reader -type DirectoryBlockReader struct { - dir string - blooms, index *os.File - - initialized bool -} - -func NewDirectoryBlockReader(dir string) *DirectoryBlockReader { - return &DirectoryBlockReader{ - dir: dir, - initialized: false, - } -} - -func (r *DirectoryBlockReader) Init() error { - if !r.initialized { - var err error - r.index, err = os.Open(filepath.Join(r.dir, seriesFileName)) - if err != nil { - return errors.Wrap(err, "opening series file") - } - - r.blooms, err = os.Open(filepath.Join(r.dir, bloomFileName)) - if err != nil { - return errors.Wrap(err, "opening bloom file") - } - - r.initialized = true - } - return nil -} - -func (r *DirectoryBlockReader) Index() (io.ReadSeeker, error) { - if !r.initialized { - if err := r.Init(); err != nil { - return nil, err - } - } - return r.index, nil -} - -func (r *DirectoryBlockReader) Blooms() (io.ReadSeeker, error) { - if !r.initialized { - if err := r.Init(); err != nil { - return nil, err - } - } - return r.blooms, nil -} diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go index ce3d85c84c4dd..76ff08f3860e8 100644 --- a/pkg/storage/bloom/v1/builder.go +++ b/pkg/storage/bloom/v1/builder.go @@ -5,11 +5,13 @@ import ( "fmt" "hash" "io" + "sort" "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/grafana/loki/pkg/chunkenc" + "github.com/grafana/loki/pkg/storage/bloom/v1/filter" "github.com/grafana/loki/pkg/util/encoding" ) @@ -62,19 +64,10 @@ type SeriesWithBloom struct { func (b *BlockBuilder) BuildFrom(itr Iterator[SeriesWithBloom]) error { for itr.Next() { - series := itr.At() - - offset, err := b.blooms.Append(series) - if err != nil { - return errors.Wrapf(err, "writing bloom for series %v", series.Series.Fingerprint) + if err := b.AddSeries(itr.At()); err != nil { + return err } - if err := b.index.Append(SeriesWithOffset{ - Offset: offset, - Series: *series.Series, - }); err != nil { - return errors.Wrapf(err, "writing index for series %v", series.Series.Fingerprint) - } } if err := itr.Err(); err != nil { @@ -90,6 +83,22 @@ func (b *BlockBuilder) BuildFrom(itr Iterator[SeriesWithBloom]) error { return nil } +func (b *BlockBuilder) AddSeries(series SeriesWithBloom) error { + offset, err := b.blooms.Append(series) + if err != nil { + return errors.Wrapf(err, "writing bloom for series %v", series.Series.Fingerprint) + } + + if err := b.index.Append(SeriesWithOffset{ + Offset: offset, + Series: *series.Series, + }); err != nil { + return errors.Wrapf(err, "writing index for series %v", series.Series.Fingerprint) + } + + return nil +} + type BloomBlockBuilder struct { opts BlockOptions writer io.WriteCloser @@ -429,3 +438,134 @@ func (b *IndexBuilder) Close() error { } return errors.Wrap(b.writer.Close(), "closing series writer") } + +// SortBlocksIntoOverlappingGroups sorts a list of blocks into a sorted list of lists, +// where each list contains blocks that overlap with each other. +// TODO(owen-d): implement as an iterator so we don't have to load all blocks at once +// NB: unused now, but likely useful when we want to optimize compaction. I wrote this expecting to need it now +// but it feels unsavory to remove it +func SortBlocksIntoOverlappingGroups(xs []*Block) (groups [][]*Block) { + sort.Slice(xs, func(i, j int) bool { + a, b := xs[i].index, xs[j].index + return a.pageHeaders[0].FromFp <= b.pageHeaders[0].FromFp + }) + + var curGroup []*Block + for _, x := range xs { + switch { + case len(curGroup) == 0: + curGroup = append(curGroup, x) + case curGroup[len(curGroup)-1].dataRange.OverlapFingerprintRange(x.dataRange): + curGroup = append(curGroup, x) + default: + groups = append(groups, curGroup) + curGroup = []*Block{x} + } + } + + if len(curGroup) > 0 { + groups = append(groups, curGroup) + } + return groups +} + +// Simplistic implementation of a merge builder that builds a single block +// from a list of blocks and a store of series. +type MergeBuilder struct { + // existing blocks + blocks []*Block + // store + store Iterator[*Series] + // Add chunks to a bloom + populate func(*Series, *Bloom) error +} + +func NewMergeBuilder(blocks []*Block, store Iterator[*Series], populate func(*Series, *Bloom) error) *MergeBuilder { + return &MergeBuilder{ + blocks: blocks, + store: store, + populate: populate, + } +} + +// NB: this will build one block. Ideally we would build multiple blocks once a target size threshold is met +// but this gives us a good starting point. +func (mb *MergeBuilder) Build(builder *BlockBuilder) error { + var ( + xs = make([]PeekingIterator[*SeriesWithBloom], 0, len(mb.blocks)) + nextInBlocks *SeriesWithBloom + ) + + for _, block := range mb.blocks { + xs = append(xs, NewPeekingIter[*SeriesWithBloom](NewBlockQuerier(block))) + } + + // Turn the list of blocks into a single iterator that returns the next series + mergedBlocks := NewPeekingIter[*SeriesWithBloom](NewMergeBlockQuerier(xs...)) + // two overlapping blocks can conceivably have the same series, so we need to dedupe, + // preferring the one with the most chunks already indexed since we'll have + // to add fewer chunks to the bloom + deduped := NewDedupingIter[*SeriesWithBloom]( + func(a, b *SeriesWithBloom) bool { + return a.Series.Fingerprint == b.Series.Fingerprint + }, + func(a, b *SeriesWithBloom) *SeriesWithBloom { + if len(a.Series.Chunks) > len(b.Series.Chunks) { + return a + } + return b + }, + mergedBlocks, + ) + + for mb.store.Next() { + nextInStore := mb.store.At() + + // advance the merged blocks iterator until we find a series that is + // greater than or equal to the next series in the store. + // TODO(owen-d): expensive, but Seek is not implemented for this itr. + // It's also more efficient to build an iterator over the Series file in the index + // without the blooms until we find a bloom we actually need to unpack from the blooms file. + for nextInBlocks == nil || nextInBlocks.Series.Fingerprint < mb.store.At().Fingerprint { + if !deduped.Next() { + // we've exhausted all the blocks + nextInBlocks = nil + break + } + nextInBlocks = deduped.At() + } + + cur := nextInBlocks + chunksToAdd := nextInStore.Chunks + // The next series from the store doesn't exist in the blocks, so we add it + // in its entirety + if nextInBlocks == nil || nextInBlocks.Series.Fingerprint > nextInStore.Fingerprint { + cur = &SeriesWithBloom{ + Series: nextInStore, + Bloom: &Bloom{ + ScalableBloomFilter: *filter.NewScalableBloomFilter(1024, 0.01, 0.8), + }, + } + } else { + // if the series already exists in the block, we only need to add the new chunks + chunksToAdd = nextInStore.Chunks.Unless(nextInBlocks.Series.Chunks) + } + + if len(chunksToAdd) > 0 { + if err := mb.populate( + &Series{ + Fingerprint: nextInStore.Fingerprint, + Chunks: chunksToAdd, + }, + cur.Bloom, + ); err != nil { + return errors.Wrapf(err, "populating bloom for series with fingerprint: %v", nextInStore.Fingerprint) + } + } + + if err := builder.AddSeries(*cur); err != nil { + return errors.Wrap(err, "adding series to block") + } + } + return nil +} diff --git a/pkg/storage/bloom/v1/dedupe.go b/pkg/storage/bloom/v1/dedupe.go index 500f26489c640..759de7e686637 100644 --- a/pkg/storage/bloom/v1/dedupe.go +++ b/pkg/storage/bloom/v1/dedupe.go @@ -1,9 +1,9 @@ package v1 -// MergeDedupeIter is a deduplicating iterator that merges adjacent elements +// DedupeIter is a deduplicating iterator that merges adjacent elements // It's intended to be used when merging multiple blocks, // each of which may contain the same fingerprints -type MergeDedupeIter[T any] struct { +type DedupeIter[T any] struct { eq func(T, T) bool merge func(T, T) T itr PeekingIterator[T] @@ -11,19 +11,19 @@ type MergeDedupeIter[T any] struct { tmp []T } -func NewMergeDedupingIter[T any]( +func NewDedupingIter[T any]( eq func(T, T) bool, merge func(T, T) T, itr PeekingIterator[T], -) *MergeDedupeIter[T] { - return &MergeDedupeIter[T]{ +) *DedupeIter[T] { + return &DedupeIter[T]{ eq: eq, merge: merge, itr: itr, } } -func (it *MergeDedupeIter[T]) Next() bool { +func (it *DedupeIter[T]) Next() bool { it.tmp = it.tmp[:0] if !it.itr.Next() { return false @@ -40,17 +40,16 @@ func (it *MergeDedupeIter[T]) Next() bool { } // merge all the elements in tmp - for len(it.tmp) > 1 { - it.tmp[len(it.tmp)-2] = it.merge(it.tmp[len(it.tmp)-2], it.tmp[len(it.tmp)-1]) - it.tmp = it.tmp[:len(it.tmp)-1] + for i := len(it.tmp) - 1; i > 0; i-- { + it.tmp[i-1] = it.merge(it.tmp[i-1], it.tmp[i]) } return true } -func (it *MergeDedupeIter[T]) Err() error { +func (it *DedupeIter[T]) Err() error { return it.itr.Err() } -func (it *MergeDedupeIter[T]) At() T { +func (it *DedupeIter[T]) At() T { return it.tmp[0] } diff --git a/pkg/storage/bloom/v1/dedupe_test.go b/pkg/storage/bloom/v1/dedupe_test.go index b7aefc75db9ea..08e0ea2f85a19 100644 --- a/pkg/storage/bloom/v1/dedupe_test.go +++ b/pkg/storage/bloom/v1/dedupe_test.go @@ -26,7 +26,7 @@ func TestMergeDedupeIter(t *testing.T) { merge := func(a, _ *SeriesWithBloom) *SeriesWithBloom { return a } - deduper := NewMergeDedupingIter[*SeriesWithBloom]( + deduper := NewDedupingIter[*SeriesWithBloom]( eq, merge, NewPeekingIter[*SeriesWithBloom](mbq), diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go index f0a6257d53170..98e170b183e7c 100644 --- a/pkg/storage/bloom/v1/index.go +++ b/pkg/storage/bloom/v1/index.go @@ -198,6 +198,10 @@ type SeriesHeader struct { FromTs, ThroughTs model.Time } +func (h SeriesHeader) OverlapFingerprintRange(other SeriesHeader) bool { + return h.ThroughFp >= other.FromFp && h.FromFp <= other.ThroughFp +} + // build one aggregated header for the entire block func aggregateHeaders(xs []SeriesHeader) SeriesHeader { if len(xs) == 0 { @@ -333,7 +337,7 @@ func (d *SeriesPageDecoder) Err() error { type Series struct { Fingerprint model.Fingerprint - Chunks []ChunkRef + Chunks ChunkRefs } type SeriesWithOffset struct { diff --git a/pkg/storage/bloom/v1/reader.go b/pkg/storage/bloom/v1/reader.go new file mode 100644 index 0000000000000..e4de9609b9082 --- /dev/null +++ b/pkg/storage/bloom/v1/reader.go @@ -0,0 +1,83 @@ +package v1 + +import ( + "bytes" + "io" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +type BlockReader interface { + Index() (io.ReadSeeker, error) + Blooms() (io.ReadSeeker, error) +} + +// In memory reader +type ByteReader struct { + index, blooms *bytes.Buffer +} + +func NewByteReader(index, blooms *bytes.Buffer) *ByteReader { + return &ByteReader{index: index, blooms: blooms} +} + +func (r *ByteReader) Index() (io.ReadSeeker, error) { + return bytes.NewReader(r.index.Bytes()), nil +} + +func (r *ByteReader) Blooms() (io.ReadSeeker, error) { + return bytes.NewReader(r.blooms.Bytes()), nil +} + +// File reader +type DirectoryBlockReader struct { + dir string + blooms, index *os.File + + initialized bool +} + +func NewDirectoryBlockReader(dir string) *DirectoryBlockReader { + return &DirectoryBlockReader{ + dir: dir, + initialized: false, + } +} + +func (r *DirectoryBlockReader) Init() error { + if !r.initialized { + var err error + r.index, err = os.Open(filepath.Join(r.dir, seriesFileName)) + if err != nil { + return errors.Wrap(err, "opening series file") + } + + r.blooms, err = os.Open(filepath.Join(r.dir, bloomFileName)) + if err != nil { + return errors.Wrap(err, "opening bloom file") + } + + r.initialized = true + } + return nil +} + +func (r *DirectoryBlockReader) Index() (io.ReadSeeker, error) { + if !r.initialized { + if err := r.Init(); err != nil { + return nil, err + } + } + return r.index, nil +} + +func (r *DirectoryBlockReader) Blooms() (io.ReadSeeker, error) { + if !r.initialized { + if err := r.Init(); err != nil { + return nil, err + } + } + return r.blooms, nil +} From 60ea954f5df20978b9724ca6180d7986276c1caa Mon Sep 17 00:00:00 2001 From: Poyzan <31743851+poyzannur@users.noreply.github.com> Date: Tue, 24 Oct 2023 17:41:48 +0100 Subject: [PATCH 18/33] Fix ring start up errors (#11015) **What this PR does / why we need it**: The errors below were introduced during adding initial service structure for BloomGateway https://github.com/grafana/loki/pull/10782 and BloomCompactor https://github.com/grafana/loki/pull/10748 components. ``` docker-loki-read-3 | panic: runtime error: invalid memory address or nil pointer dereference docker-loki-read-3 | [signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x9e6d90] docker-loki-read-3 | docker-loki-read-3 | goroutine 1 [running]: docker-loki-read-3 | github.com/grafana/dskit/kv.createClient({_, _}, {_, _}, {{{0x22f6128, 0xe}, {{0x0, 0x0}}, 0x4a817c800, 0x0, ...}, ...}, ...) docker-loki-read-3 | /src/loki/vendor/github.com/grafana/dskit/kv/client.go:158 +0x360 docker-loki-read-3 | github.com/grafana/dskit/kv.NewClient({{0x22ec444, 0xa}, {0x22ee95d, 0xb}, {{{0x22f6128, 0xe}, {{...}}, 0x4a817c800, 0x0, 0x3ff0000000000000, ...}, ...}, ...}, ...) ``` This PR initialies new components as part of MemberlistKV. **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: Tested on local via docker workflow **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. --- pkg/loki/modules.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index dd53f3f262195..bca82aaa5554a 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1155,7 +1155,8 @@ func (t *Loki) initMemberlistKV() (services.Service, error) { t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.QueryScheduler.SchedulerRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - + t.Cfg.BloomGateway.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV + t.Cfg.BloomCompactor.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Server.HTTP.Handle("/memberlist", t.MemberlistKV) if t.Cfg.InternalServer.Enable { From 52a3f16039dd5ff655fc3681257d99794f620ec4 Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Wed, 25 Oct 2023 12:16:36 +0200 Subject: [PATCH 19/33] Flag categorize labels on streams response (#10419) We recently introduced support for ingesting and querying structured metadata in Loki. This adds a new dimension to Loki's labels since now we arguably have three categories of labels: _stream_, _structured metadata_, and _parsed_ labels. Depending on the origin of the labels, they should be used in LogQL expressions differently to achieve optimal performance. _stream_ labels should be added to stream matchers, _structured metadata_ labels should be used in a filter expression before any parsing expression, and _parsed_ labels should be placed after the parser expression extracting them. The Grafana UI has a hard time dealing with this same problem. Before https://github.com/grafana/grafana/pull/73955, the filtering functionality in Grafana was broken since it was not able to distinguish between _stream_ and _structured metadata_ labels. Also, as soon as a parser expression was added to the query, filters added by Grafana would be appended to the end of the query regardless of the label category. The PR above implements a workaround for this problem but needs a better API on Loki's end to mitigate all corner cases. Loki currently returns the following JSON for log queries: ```json ... { "stream": { "cluster": "us-central", "container": "query-frontend", "namespace": "loki", "level": "info", "traceID": "68810cf0c94bfcca" }, "values": [ [ "1693996529000222496", "1693996529000222496 aaaaaaaaa.....\n" ], ... }, { "stream": { "cluster": "us-central", "container": "query-frontend", "namespace": "loki", "level": "debug", "traceID": "a7116cj54c4bjz8s" }, "values": [ [ "1693996529000222497", "1693996529000222497 bbbbbbbbb.....\n" ], ... }, ... ``` As can be seen, there is no way we can distinguish the category of each label. This PR introduces a new flag `X-Loki-Response-Encoding-Flags: categorize-labels` that makes Loki return categorized labels as follows: ```json ... { "stream": { "cluster": "us-central", "container": "query-frontend", "namespace": "loki", }, "values": [ [ "1693996529000222496", "1693996529000222496 aaaaaaaaa.....\n", { "structuredMetadata": { "traceID": "68810cf0c94bfcca" }, "parsed": { "level": "info" } } ], [ "1693996529000222497", "1693996529000222497 bbbbbbbbb.....\n", { "structuredMetadata": { "traceID": "a7116cj54c4bjz8s" }, "parsed": { "level": "debug" } } ], ... }, ... ``` Note that this PR only supports log queries, not metric queries. From a UX perspective, being able to categorize labels in metric queries doesn't have any benefit yet. Having said that, supporting this for metric queries would require some minor refactoring on top of what has been implemented here. If we decide to do that, I think we should do it on a separate PR to avoid making this PR even larger. I also decided to leave out support for Tail queries to avoid making this PR even larger. Once this one gets merged, we can work to support tailing. --- **Note to reviewers** This PR is huge since we need to forward categorized all over the codebase (from parsing logs all the way to marshaling), fortunately, many of the changes come from updating tests and refactoring iterators. Tested out in a dev cell with query `'{stream="stdout"} | label_format new="text"`. - Without the new flag: ``` $ http http://127.0.0.1:3100/loki/api/v1/query_range\?direction\=BACKWARD\&end\=1693996529322486000\&limit\=30\&query\=%7Bstream%3D%22stdout%22%7D+%7C+label_format+new%3D%22text%22\&start\=1693992929322486000 X-Scope-Orgid:REDACTED { "data": { "result": [ { "stream": { "new": "text", "pod": "loki-canary-986bd6f4b-xqmb7", "stream": "stdout" }, "values": [ [ "1693996529000222496", "1693996529000222496 pppppppppppp...\n" ], [ "1693996528499160852", "1693996528499160852 pppppppppppp...\n" ], ... ``` - With the new flag ``` $ http http://127.0.0.1:3100/loki/api/v1/query_range\?direction\=BACKWARD\&end\=1693996529322486000\&limit\=30\&query\=%7Bstream%3D%22stdout%22%7D+%7C+label_format+new%3D%22text%22\&start\=1693992929322486000 X-Scope-Orgid:REDACTED X-Loki-Response-Encoding-Flags:categorize-labels { "data": { "encodingFlags": [ "categorize-labels" ], "result": [ { "stream": { "pod": "loki-canary-986bd6f4b-xqmb7", "stream": "stdout" }, "values": [ [ "1693996529000222496", "1693996529000222496 pppppppppppp...\n", { "parsed": { "new": "text" } } ], [ "1693996528499160852", "1693996528499160852 pppppppppppp...\n", { "parsed": { "new": "text" } } ], ... ``` --- go.mod | 2 +- integration/client/client.go | 69 +- .../loki_micro_services_delete_test.go | 2 +- integration/loki_micro_services_test.go | 293 ++++++++ pkg/chunkenc/dumb_chunk.go | 2 +- pkg/chunkenc/interface.go | 4 +- pkg/chunkenc/memchunk.go | 36 +- pkg/chunkenc/memchunk_test.go | 129 ++-- pkg/chunkenc/unordered.go | 26 +- pkg/chunkenc/unordered_test.go | 21 +- pkg/compactor/retention/retention_test.go | 3 +- pkg/iter/entry_iterator.go | 12 - pkg/loghttp/entry.go | 70 +- pkg/loghttp/query.go | 5 +- pkg/loghttp/query_test.go | 17 +- pkg/logql/engine.go | 46 +- pkg/logql/engine_test.go | 4 +- pkg/logql/log/fmt.go | 6 +- pkg/logql/log/ip.go | 3 +- pkg/logql/log/labels.go | 315 ++++++-- pkg/logql/log/labels_test.go | 86 ++- pkg/logql/log/metrics_extraction.go | 4 +- pkg/logql/log/parser.go | 24 +- pkg/logql/log/parser_test.go | 3 +- pkg/logql/log/pipeline.go | 4 +- pkg/logql/log/pipeline_test.go | 95 ++- pkg/loki/modules.go | 5 +- pkg/push/push.pb.go | 155 +++- pkg/push/push.proto | 7 + pkg/push/types.go | 98 ++- pkg/push/types_test.go | 16 +- pkg/querier/http.go | 2 +- pkg/querier/queryrange/codec.go | 28 +- pkg/querier/queryrange/codec_test.go | 237 +++++- pkg/querier/queryrange/serialize.go | 4 +- pkg/storage/lazy_chunk_test.go | 2 +- pkg/util/httpreq/encoding_flags.go | 113 +++ pkg/util/marshal/labels.go | 2 +- pkg/util/marshal/legacy/marshal_test.go | 12 +- pkg/util/marshal/marshal.go | 8 +- pkg/util/marshal/marshal_test.go | 676 +++++++++--------- pkg/util/marshal/query.go | 95 ++- .../grafana/loki/pkg/push/push.pb.go | 155 +++- .../grafana/loki/pkg/push/push.proto | 7 + .../github.com/grafana/loki/pkg/push/types.go | 98 ++- vendor/modules.txt | 2 +- 46 files changed, 2190 insertions(+), 813 deletions(-) create mode 100644 pkg/util/httpreq/encoding_flags.go diff --git a/go.mod b/go.mod index a7415b9f2262c..77228a1eaa12d 100644 --- a/go.mod +++ b/go.mod @@ -123,7 +123,7 @@ require ( github.com/efficientgo/core v1.0.0-rc.2 github.com/fsnotify/fsnotify v1.6.0 github.com/gogo/googleapis v1.4.0 - github.com/grafana/loki/pkg/push v0.0.0-20231017172654-cfc4f0e84adc + github.com/grafana/loki/pkg/push v0.0.0-20231023154132-0a7737e7c7eb github.com/heroku/x v0.0.61 github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.86.0 diff --git a/integration/client/client.go b/integration/client/client.go index 8e13ed8ef1364..12b5cd11277c5 100644 --- a/integration/client/client.go +++ b/integration/client/client.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/buger/jsonparser" "github.com/grafana/dskit/user" "github.com/prometheus/prometheus/model/labels" "go.opentelemetry.io/collector/pdata/pcommon" @@ -335,10 +336,40 @@ func (c *Client) GetDeleteRequests() (DeleteRequests, error) { return deleteReqs, nil } +type Entry []string + +func (e *Entry) UnmarshalJSON(data []byte) error { + if *e == nil { + *e = make([]string, 0, 3) + } + + var parseError error + _, err := jsonparser.ArrayEach(data, func(value []byte, t jsonparser.ValueType, _ int, _ error) { + // The TS and the lines are strings. The labels are a JSON object. + // but we will parse them as strings. + if t != jsonparser.String && t != jsonparser.Object { + parseError = jsonparser.MalformedStringError + return + } + + v, err := jsonparser.ParseString(value) + if err != nil { + parseError = err + return + } + *e = append(*e, v) + }) + + if parseError != nil { + return parseError + } + return err +} + // StreamValues holds a label key value pairs for the Stream and a list of a list of values type StreamValues struct { Stream map[string]string - Values [][]string + Values []Entry } // MatrixValues holds a label key value pairs for the metric and a list of a list of values @@ -377,17 +408,19 @@ func (a *VectorValues) UnmarshalJSON(b []byte) error { // DataType holds the result type and a list of StreamValues type DataType struct { - ResultType string - Stream []StreamValues - Matrix []MatrixValues - Vector []VectorValues + ResultType string + Stream []StreamValues + Matrix []MatrixValues + Vector []VectorValues + EncodingFlags []string } func (a *DataType) UnmarshalJSON(b []byte) error { // get the result type var s struct { - ResultType string `json:"resultType"` - Result json.RawMessage `json:"result"` + ResultType string `json:"resultType"` + EncodingFlags []string `json:"encodingFlags"` + Result json.RawMessage `json:"result"` } if err := json.Unmarshal(b, &s); err != nil { return err @@ -410,6 +443,7 @@ func (a *DataType) UnmarshalJSON(b []byte) error { return fmt.Errorf("unknown result type %s", s.ResultType) } a.ResultType = s.ResultType + a.EncodingFlags = s.EncodingFlags return nil } @@ -434,12 +468,16 @@ type Rules struct { Rules []interface{} } +type Header struct { + Name, Value string +} + // RunRangeQuery runs a query and returns an error if anything went wrong -func (c *Client) RunRangeQuery(ctx context.Context, query string) (*Response, error) { +func (c *Client) RunRangeQuery(ctx context.Context, query string, extraHeaders ...Header) (*Response, error) { ctx, cancelFunc := context.WithTimeout(ctx, requestTimeout) defer cancelFunc() - buf, statusCode, err := c.run(ctx, c.rangeQueryURL(query)) + buf, statusCode, err := c.run(ctx, c.rangeQueryURL(query), extraHeaders...) if err != nil { return nil, err } @@ -448,7 +486,7 @@ func (c *Client) RunRangeQuery(ctx context.Context, query string) (*Response, er } // RunQuery runs a query and returns an error if anything went wrong -func (c *Client) RunQuery(ctx context.Context, query string) (*Response, error) { +func (c *Client) RunQuery(ctx context.Context, query string, extraHeaders ...Header) (*Response, error) { ctx, cancelFunc := context.WithTimeout(ctx, requestTimeout) defer cancelFunc() @@ -463,7 +501,7 @@ func (c *Client) RunQuery(ctx context.Context, query string) (*Response, error) u.Path = "/loki/api/v1/query" u.RawQuery = v.Encode() - buf, statusCode, err := c.run(ctx, u.String()) + buf, statusCode, err := c.run(ctx, u.String(), extraHeaders...) if err != nil { return nil, err } @@ -617,18 +655,21 @@ func (c *Client) Series(ctx context.Context, matcher string) ([]map[string]strin return values.Data, nil } -func (c *Client) request(ctx context.Context, method string, url string) (*http.Request, error) { +func (c *Client) request(ctx context.Context, method string, url string, extraHeaders ...Header) (*http.Request, error) { ctx = user.InjectOrgID(ctx, c.instanceID) req, err := http.NewRequestWithContext(ctx, method, url, nil) if err != nil { return nil, err } req.Header.Set("X-Scope-OrgID", c.instanceID) + for _, h := range extraHeaders { + req.Header.Add(h.Name, h.Value) + } return req, nil } -func (c *Client) run(ctx context.Context, u string) ([]byte, int, error) { - req, err := c.request(ctx, "GET", u) +func (c *Client) run(ctx context.Context, u string, extraHeaders ...Header) ([]byte, int, error) { + req, err := c.request(ctx, "GET", u, extraHeaders...) if err != nil { return nil, 0, err } diff --git a/integration/loki_micro_services_delete_test.go b/integration/loki_micro_services_delete_test.go index e1759783967fc..5cce134d94b2c 100644 --- a/integration/loki_micro_services_delete_test.go +++ b/integration/loki_micro_services_delete_test.go @@ -408,7 +408,7 @@ func getMetricValue(t *testing.T, metricName, metrics string) float64 { } func pushRequestToClientStreamValues(t *testing.T, p pushRequest) []client.StreamValues { - logsByStream := map[string][][]string{} + logsByStream := map[string][]client.Entry{} for _, entry := range p.entries { lb := labels.NewBuilder(labels.FromMap(p.stream)) for _, l := range entry.StructuredMetadata { diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go index d2a285d154572..45942076569c3 100644 --- a/integration/loki_micro_services_test.go +++ b/integration/loki_micro_services_test.go @@ -2,12 +2,14 @@ package integration import ( "context" + "encoding/json" "strings" "testing" "time" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -16,6 +18,7 @@ import ( "github.com/grafana/loki/integration/cluster" "github.com/grafana/loki/pkg/storage" + "github.com/grafana/loki/pkg/util/httpreq" "github.com/grafana/loki/pkg/util/querylimits" ) @@ -839,6 +842,296 @@ func TestOTLPLogsIngestQuery(t *testing.T) { }) } +func TestCategorizedLabels(t *testing.T) { + clu := cluster.New(nil, cluster.SchemaWithTSDB, func(c *cluster.Cluster) { + c.SetSchemaVer("v13") + }) + + defer func() { + assert.NoError(t, clu.Cleanup()) + }() + + var ( + tDistributor = clu.AddComponent( + "distributor", + "-target=distributor", + ) + tIndexGateway = clu.AddComponent( + "index-gateway", + "-target=index-gateway", + "-tsdb.enable-postings-cache=true", + "-store.index-cache-read.embedded-cache.enabled=true", + ) + ) + require.NoError(t, clu.Run()) + + var ( + tIngester = clu.AddComponent( + "ingester", + "-target=ingester", + "-ingester.flush-on-shutdown=true", + "-ingester.wal-enabled=false", + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + ) + tQueryScheduler = clu.AddComponent( + "query-scheduler", + "-target=query-scheduler", + "-query-scheduler.use-scheduler-ring=false", + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + ) + tCompactor = clu.AddComponent( + "compactor", + "-target=compactor", + "-boltdb.shipper.compactor.compaction-interval=1s", + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + ) + ) + require.NoError(t, clu.Run()) + + // finally, run the query-frontend and querier. + var ( + tQueryFrontend = clu.AddComponent( + "query-frontend", + "-target=query-frontend", + "-frontend.scheduler-address="+tQueryScheduler.GRPCURL(), + "-frontend.default-validity=0s", + "-common.compactor-address="+tCompactor.HTTPURL(), + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + ) + _ = clu.AddComponent( + "querier", + "-target=querier", + "-querier.scheduler-address="+tQueryScheduler.GRPCURL(), + "-common.compactor-address="+tCompactor.HTTPURL(), + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + ) + ) + require.NoError(t, clu.Run()) + + tenantID := randStringRunes() + + now := time.Now() + cliDistributor := client.New(tenantID, "", tDistributor.HTTPURL()) + cliDistributor.Now = now + cliIngester := client.New(tenantID, "", tIngester.HTTPURL()) + cliIngester.Now = now + cliQueryFrontend := client.New(tenantID, "", tQueryFrontend.HTTPURL()) + cliQueryFrontend.Now = now + cliIndexGateway := client.New(tenantID, "", tIndexGateway.HTTPURL()) + cliIndexGateway.Now = now + + now = time.Now() + require.NoError(t, cliDistributor.PushLogLine("lineA", now.Add(-1*time.Second), nil, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineB", now.Add(-2*time.Second), map[string]string{"traceID": "123", "user": "a"}, map[string]string{"job": "fake"})) + require.NoError(t, tIngester.Restart()) + require.NoError(t, cliDistributor.PushLogLine("lineC msg=foo", now.Add(-3*time.Second), map[string]string{"traceID": "456", "user": "b"}, map[string]string{"job": "fake"})) + require.NoError(t, cliDistributor.PushLogLine("lineD msg=foo text=bar", now.Add(-4*time.Second), map[string]string{"traceID": "789", "user": "c"}, map[string]string{"job": "fake"})) + + type expectedStream struct { + Stream map[string]string + Lines []string + CategorizedLabels []map[string]map[string]string + } + + for _, tc := range []struct { + name string + query string + encodingFlags []string + expectedStreams []expectedStream + }{ + { + name: "no header - no parser ", + query: `{job="fake"}`, + expectedStreams: []expectedStream{ + { + Stream: labels.FromStrings("job", "fake").Map(), + Lines: []string{"lineA"}, + }, + { + Stream: map[string]string{ + "job": "fake", + "traceID": "123", + "user": "a", + }, + Lines: []string{"lineB"}, + }, + { + Stream: map[string]string{ + "job": "fake", + "traceID": "456", + "user": "b", + }, + Lines: []string{"lineC msg=foo"}, + }, + { + Stream: map[string]string{ + "job": "fake", + "traceID": "789", + "user": "c", + }, + Lines: []string{"lineD msg=foo text=bar"}, + }, + }, + }, + { + name: "no header - with parser", + query: `{job="fake"} | logfmt`, + expectedStreams: []expectedStream{ + { + Stream: map[string]string{ + "job": "fake", + }, + Lines: []string{"lineA"}, + }, + { + Stream: map[string]string{ + "job": "fake", + "traceID": "123", + "user": "a", + }, + Lines: []string{"lineB"}, + }, + { + Stream: map[string]string{ + "job": "fake", + "traceID": "456", + "user": "b", + "msg": "foo", + }, + Lines: []string{"lineC msg=foo"}, + }, + { + Stream: map[string]string{ + "job": "fake", + "traceID": "789", + "user": "c", + "msg": "foo", + "text": "bar", + }, + Lines: []string{"lineD msg=foo text=bar"}, + }, + }, + }, + { + name: "with header - no parser ", + query: `{job="fake"}`, + encodingFlags: []string{ + string(httpreq.FlagCategorizeLabels), + }, + expectedStreams: []expectedStream{ + { + Stream: map[string]string{ + "job": "fake", + }, + Lines: []string{"lineA", "lineB", "lineC msg=foo", "lineD msg=foo text=bar"}, + CategorizedLabels: []map[string]map[string]string{ + { + "structuredMetadata": { + "traceID": "123", + "user": "a", + }, + }, + { + "structuredMetadata": { + "traceID": "456", + "user": "b", + }, + }, + { + "structuredMetadata": { + "traceID": "789", + "user": "c", + }, + }, + }, + }, + }, + }, + { + name: "with header - with parser", + query: `{job="fake"} | logfmt`, + encodingFlags: []string{ + string(httpreq.FlagCategorizeLabels), + }, + expectedStreams: []expectedStream{ + { + Stream: map[string]string{ + "job": "fake", + }, + Lines: []string{"lineA", "lineB", "lineC msg=foo", "lineD msg=foo text=bar"}, + CategorizedLabels: []map[string]map[string]string{ + { + "structuredMetadata": { + "traceID": "123", + "user": "a", + }, + }, + { + "structuredMetadata": { + "traceID": "456", + "user": "b", + }, + "parsed": { + "msg": "foo", + }, + }, + { + "structuredMetadata": { + "traceID": "789", + "user": "c", + }, + "parsed": { + "msg": "foo", + "text": "bar", + }, + }, + }, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + // Add header with encoding flags and expect them to be returned in the response. + var headers []client.Header + var expectedEncodingFlags []string + if len(tc.encodingFlags) > 0 { + headers = append(headers, client.Header{Name: httpreq.LokiEncodingFlagsHeader, Value: strings.Join(tc.encodingFlags, httpreq.EncodeFlagsDelimiter)}) + expectedEncodingFlags = tc.encodingFlags + } + + resp, err := cliQueryFrontend.RunQuery(context.Background(), tc.query, headers...) + require.NoError(t, err) + assert.Equal(t, "streams", resp.Data.ResultType) + + var streams []expectedStream + for _, stream := range resp.Data.Stream { + var lines []string + var categorizedLabels []map[string]map[string]string + + for _, val := range stream.Values { + lines = append(lines, val[1]) + + var catLabels map[string]map[string]string + if len(val) >= 3 && val[2] != "" { + err = json.Unmarshal([]byte(val[2]), &catLabels) + require.NoError(t, err) + categorizedLabels = append(categorizedLabels, catLabels) + } + } + + streams = append(streams, expectedStream{ + Stream: stream.Stream, + Lines: lines, + CategorizedLabels: categorizedLabels, + }) + } + + assert.ElementsMatch(t, tc.expectedStreams, streams) + assert.ElementsMatch(t, expectedEncodingFlags, resp.Data.EncodingFlags) + }) + } +} + func getValueFromMF(mf *dto.MetricFamily, lbs []*dto.LabelPair) float64 { for _, m := range mf.Metric { if !assert.ObjectsAreEqualValues(lbs, m.GetLabel()) { diff --git a/pkg/chunkenc/dumb_chunk.go b/pkg/chunkenc/dumb_chunk.go index 2aa7dee95591e..793e0b751829b 100644 --- a/pkg/chunkenc/dumb_chunk.go +++ b/pkg/chunkenc/dumb_chunk.go @@ -72,7 +72,7 @@ func (c *dumbChunk) Encoding() Encoding { return EncNone } // Returns an iterator that goes from _most_ recent to _least_ recent (ie, // backwards). -func (c *dumbChunk) Iterator(_ context.Context, from, through time.Time, direction logproto.Direction, _ log.StreamPipeline, _ ...iter.EntryIteratorOption) (iter.EntryIterator, error) { +func (c *dumbChunk) Iterator(_ context.Context, from, through time.Time, direction logproto.Direction, _ log.StreamPipeline) (iter.EntryIterator, error) { i := sort.Search(len(c.entries), func(i int) bool { return !from.After(c.entries[i].Timestamp) }) diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go index 9a066402bed8f..0985f4a883c22 100644 --- a/pkg/chunkenc/interface.go +++ b/pkg/chunkenc/interface.go @@ -129,7 +129,7 @@ type Chunk interface { Bounds() (time.Time, time.Time) SpaceFor(*logproto.Entry) bool Append(*logproto.Entry) error - Iterator(ctx context.Context, mintT, maxtT time.Time, direction logproto.Direction, pipeline log.StreamPipeline, options ...iter.EntryIteratorOption) (iter.EntryIterator, error) + Iterator(ctx context.Context, mintT, maxtT time.Time, direction logproto.Direction, pipeline log.StreamPipeline) (iter.EntryIterator, error) SampleIterator(ctx context.Context, from, through time.Time, extractor log.StreamSampleExtractor) iter.SampleIterator // Returns the list of blocks in the chunks. Blocks(mintT, maxtT time.Time) []Block @@ -158,7 +158,7 @@ type Block interface { // Entries is the amount of entries in the block. Entries() int // Iterator returns an entry iterator for the block. - Iterator(ctx context.Context, pipeline log.StreamPipeline, options ...iter.EntryIteratorOption) iter.EntryIterator + Iterator(ctx context.Context, pipeline log.StreamPipeline) iter.EntryIterator // SampleIterator returns a sample iterator for the block. SampleIterator(ctx context.Context, extractor log.StreamSampleExtractor) iter.SampleIterator } diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go index 0fc52db7f3641..f0a5127480730 100644 --- a/pkg/chunkenc/memchunk.go +++ b/pkg/chunkenc/memchunk.go @@ -950,7 +950,7 @@ func (c *MemChunk) Bounds() (fromT, toT time.Time) { } // Iterator implements Chunk. -func (c *MemChunk) Iterator(ctx context.Context, mintT, maxtT time.Time, direction logproto.Direction, pipeline log.StreamPipeline, options ...iter.EntryIteratorOption) (iter.EntryIterator, error) { +func (c *MemChunk) Iterator(ctx context.Context, mintT, maxtT time.Time, direction logproto.Direction, pipeline log.StreamPipeline) (iter.EntryIterator, error) { mint, maxt := mintT.UnixNano(), maxtT.UnixNano() blockItrs := make([]iter.EntryIterator, 0, len(c.blocks)+1) @@ -977,7 +977,7 @@ func (c *MemChunk) Iterator(ctx context.Context, mintT, maxtT time.Time, directi } lastMax = b.maxt - blockItrs = append(blockItrs, encBlock{c.encoding, c.format, c.symbolizer, b}.Iterator(ctx, pipeline, options...)) + blockItrs = append(blockItrs, encBlock{c.encoding, c.format, c.symbolizer, b}.Iterator(ctx, pipeline)) } if !c.head.IsEmpty() { @@ -985,7 +985,7 @@ func (c *MemChunk) Iterator(ctx context.Context, mintT, maxtT time.Time, directi if from < lastMax { ordered = false } - headIterator = c.head.Iterator(ctx, direction, mint, maxt, pipeline, options...) + headIterator = c.head.Iterator(ctx, direction, mint, maxt, pipeline) } if direction == logproto.FORWARD { @@ -1100,7 +1100,7 @@ func (c *MemChunk) Blocks(mintT, maxtT time.Time) []Block { // Rebound builds a smaller chunk with logs having timestamp from start and end(both inclusive) func (c *MemChunk) Rebound(start, end time.Time, filter filter.Func) (Chunk, error) { // add a millisecond to end time because the Chunk.Iterator considers end time to be non-inclusive. - itr, err := c.Iterator(context.Background(), start, end.Add(time.Millisecond), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{}), iter.WithKeepStructuredMetadata()) + itr, err := c.Iterator(context.Background(), start, end.Add(time.Millisecond), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{})) if err != nil { return nil, err } @@ -1149,11 +1149,11 @@ type encBlock struct { block } -func (b encBlock) Iterator(ctx context.Context, pipeline log.StreamPipeline, options ...iter.EntryIteratorOption) iter.EntryIterator { +func (b encBlock) Iterator(ctx context.Context, pipeline log.StreamPipeline) iter.EntryIterator { if len(b.b) == 0 { return iter.NoopIterator } - return newEntryIterator(ctx, GetReaderPool(b.enc), b.b, pipeline, b.format, b.symbolizer, options...) + return newEntryIterator(ctx, GetReaderPool(b.enc), b.b, pipeline, b.format, b.symbolizer) } func (b encBlock) SampleIterator(ctx context.Context, extractor log.StreamSampleExtractor) iter.SampleIterator { @@ -1179,7 +1179,7 @@ func (b block) MaxTime() int64 { return b.maxt } -func (hb *headBlock) Iterator(ctx context.Context, direction logproto.Direction, mint, maxt int64, pipeline log.StreamPipeline, _ ...iter.EntryIteratorOption) iter.EntryIterator { +func (hb *headBlock) Iterator(ctx context.Context, direction logproto.Direction, mint, maxt int64, pipeline log.StreamPipeline) iter.EntryIterator { if hb.IsEmpty() || (maxt < hb.mint || hb.maxt < mint) { return iter.NoopIterator } @@ -1205,7 +1205,7 @@ func (hb *headBlock) Iterator(ctx context.Context, direction logproto.Direction, } stats.AddPostFilterLines(1) var stream *logproto.Stream - labels := parsedLbs.Labels().String() + labels := parsedLbs.String() var ok bool if stream, ok = streams[labels]; !ok { stream = &logproto.Stream{ @@ -1582,23 +1582,16 @@ func (si *bufferedIterator) close() { si.origBytes = nil } -func newEntryIterator(ctx context.Context, pool ReaderPool, b []byte, pipeline log.StreamPipeline, format byte, symbolizer *symbolizer, options ...iter.EntryIteratorOption) iter.EntryIterator { - entryIter := &entryBufferedIterator{ +func newEntryIterator(ctx context.Context, pool ReaderPool, b []byte, pipeline log.StreamPipeline, format byte, symbolizer *symbolizer) iter.EntryIterator { + return &entryBufferedIterator{ bufferedIterator: newBufferedIterator(ctx, pool, b, format, symbolizer), pipeline: pipeline, } - - for _, opt := range options { - opt(&entryIter.iterOptions) - } - - return entryIter } type entryBufferedIterator struct { *bufferedIterator - pipeline log.StreamPipeline - iterOptions iter.EntryIteratorOptions + pipeline log.StreamPipeline cur logproto.Entry currLabels log.LabelsResult @@ -1623,12 +1616,9 @@ func (e *entryBufferedIterator) Next() bool { e.currLabels = lbs e.cur.Timestamp = time.Unix(0, e.currTs) e.cur.Line = string(newLine) + e.cur.StructuredMetadata = logproto.FromLabelsToLabelAdapters(lbs.StructuredMetadata()) + e.cur.Parsed = logproto.FromLabelsToLabelAdapters(lbs.Parsed()) - // Most of the time, there is no need to send back the labels of structured metadata, as they are already part of the labels results. - // Still it might be needed for example when appending entries from one chunk into another one. - if e.iterOptions.KeepStructuredMetdata { - e.cur.StructuredMetadata = logproto.FromLabelsToLabelAdapters(e.currStructuredMetadata) - } return true } return false diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go index 22116d751f82e..593b3d7de224f 100644 --- a/pkg/chunkenc/memchunk_test.go +++ b/pkg/chunkenc/memchunk_test.go @@ -193,10 +193,14 @@ func TestBlock(t *testing.T) { e := it.Entry() require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) require.Equal(t, cases[idx].str, e.Line) - require.Empty(t, e.StructuredMetadata) if chunkFormat < ChunkFormatV4 { require.Equal(t, labels.EmptyLabels().String(), it.Labels()) + require.Empty(t, e.StructuredMetadata) } else { + if len(cases[idx].lbs) > 0 { + require.Equal(t, push.LabelsAdapter(cases[idx].lbs), e.StructuredMetadata) + } + expectedLabels := logproto.FromLabelAdaptersToLabels(cases[idx].lbs).String() require.Equal(t, expectedLabels, it.Labels()) } @@ -452,11 +456,12 @@ func TestSerialization(t *testing.T) { e := it.Entry() require.Equal(t, int64(i), e.Timestamp.UnixNano()) require.Equal(t, strconv.Itoa(i), e.Line) - require.Nil(t, e.StructuredMetadata) if appendWithStructuredMetadata && testData.chunkFormat >= ChunkFormatV4 { require.Equal(t, labels.FromStrings("foo", strconv.Itoa(i)).String(), it.Labels()) + require.Equal(t, labels.FromStrings("foo", strconv.Itoa(i)), logproto.FromLabelAdaptersToLabels(e.StructuredMetadata)) } else { require.Equal(t, labels.EmptyLabels().String(), it.Labels()) + require.Nil(t, e.StructuredMetadata) } } require.NoError(t, it.Error()) @@ -1735,10 +1740,11 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { expectedBytes := lineBytes + expectedStructuredMetadataBytes for _, tc := range []struct { - name string - query string - expectedLines []string - expectedStreams []string + name string + query string + expectedLines []string + expectedStreams []string + expectedStructuredMetadata [][]logproto.LabelAdapter }{ { name: "no-filter", @@ -1750,6 +1756,12 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { labels.FromStrings("job", "fake", "traceID", "789", "user", "c").String(), labels.FromStrings("job", "fake", "traceID", "123", "user", "d").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123", "user", "a")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "456", "user", "b")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "789", "user", "c")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123", "user", "d")), + }, }, { name: "filter", @@ -1758,6 +1770,9 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { expectedStreams: []string{ labels.FromStrings("job", "fake", "traceID", "789", "user", "c").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "789", "user", "c")), + }, }, { name: "filter-regex-or", @@ -1767,6 +1782,10 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { labels.FromStrings("job", "fake", "traceID", "456", "user", "b").String(), labels.FromStrings("job", "fake", "traceID", "789", "user", "c").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "456", "user", "b")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "789", "user", "c")), + }, }, { name: "filter-regex-contains", @@ -1775,6 +1794,9 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { expectedStreams: []string{ labels.FromStrings("job", "fake", "traceID", "456", "user", "b").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "456", "user", "b")), + }, }, { name: "filter-regex-complex", @@ -1784,6 +1806,10 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { labels.FromStrings("job", "fake", "traceID", "123", "user", "a").String(), labels.FromStrings("job", "fake", "traceID", "123", "user", "d").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123", "user", "a")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123", "user", "d")), + }, }, { name: "multiple-filters", @@ -1792,6 +1818,9 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { expectedStreams: []string{ labels.FromStrings("job", "fake", "traceID", "123", "user", "d").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "123", "user", "d")), + }, }, { name: "keep", @@ -1803,6 +1832,12 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { labels.FromStrings("job", "fake", "user", "c").String(), labels.FromStrings("job", "fake", "user", "d").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "a")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "b")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "c")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "d")), + }, }, { name: "keep-filter", @@ -1814,6 +1849,9 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { labels.FromStrings("job", "fake").String(), labels.FromStrings("job", "fake").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "b")), + }, }, { name: "drop", @@ -1825,6 +1863,12 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { labels.FromStrings("job", "fake", "user", "c").String(), labels.FromStrings("job", "fake", "user", "d").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "a")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "b")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "c")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "d")), + }, }, { name: "drop-filter", @@ -1836,6 +1880,12 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { labels.FromStrings("job", "fake", "traceID", "789", "user", "c").String(), labels.FromStrings("job", "fake", "user", "d").String(), }, + expectedStructuredMetadata: [][]logproto.LabelAdapter{ + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "a")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "456", "user", "b")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "789", "user", "c")), + logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "d")), + }, }, } { t.Run(tc.name, func(t *testing.T) { @@ -1855,18 +1905,21 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { var lines []string var streams []string + var structuredMetadata [][]logproto.LabelAdapter for it.Next() { require.NoError(t, it.Error()) e := it.Entry() lines = append(lines, e.Line) streams = append(streams, it.Labels()) - // We don't want to send back the structured metadata since - // they are already part of the returned labels. - require.Empty(t, e.StructuredMetadata) + if len(e.StructuredMetadata) > 0 { + structuredMetadata = append(structuredMetadata, e.StructuredMetadata) + } + require.Empty(t, e.Parsed) } assert.ElementsMatch(t, tc.expectedLines, lines) assert.ElementsMatch(t, tc.expectedStreams, streams) + assert.ElementsMatch(t, tc.expectedStructuredMetadata, structuredMetadata) resultStats := sts.Result(0, 0, len(lines)) require.Equal(t, int64(expectedBytes), resultStats.Summary.TotalBytesProcessed) @@ -1909,61 +1962,3 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { }) } } - -func TestMemChunk_IteratorOptions(t *testing.T) { - chk := newMemChunkWithFormat(ChunkFormatV4, EncNone, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize) - require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(0, "0", logproto.FromLabelsToLabelAdapters( - labels.FromStrings("a", "0"), - )))) - require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(1, "1", logproto.FromLabelsToLabelAdapters( - labels.FromStrings("a", "1"), - )))) - require.NoError(t, chk.cut()) - require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(2, "2", logproto.FromLabelsToLabelAdapters( - labels.FromStrings("a", "2"), - )))) - require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(3, "3", logproto.FromLabelsToLabelAdapters( - labels.FromStrings("a", "3"), - )))) - - for _, tc := range []struct { - name string - options []iter.EntryIteratorOption - expectStructuredMetadata bool - }{ - { - name: "No options", - expectStructuredMetadata: false, - }, - { - name: "WithKeepStructuredMetadata", - options: []iter.EntryIteratorOption{ - iter.WithKeepStructuredMetadata(), - }, - - expectStructuredMetadata: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - it, err := chk.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, noopStreamPipeline, tc.options...) - require.NoError(t, err) - - var idx int64 - for it.Next() { - expectedLabels := labels.FromStrings("a", fmt.Sprintf("%d", idx)) - expectedEntry := logproto.Entry{ - Timestamp: time.Unix(0, idx), - Line: fmt.Sprintf("%d", idx), - } - - if tc.expectStructuredMetadata { - expectedEntry.StructuredMetadata = logproto.FromLabelsToLabelAdapters(expectedLabels) - } - - require.Equal(t, expectedEntry, it.Entry()) - require.Equal(t, expectedLabels.String(), it.Labels()) - idx++ - } - }) - } -} diff --git a/pkg/chunkenc/unordered.go b/pkg/chunkenc/unordered.go index 0cd1d1bb133fd..cd7ea5dec122a 100644 --- a/pkg/chunkenc/unordered.go +++ b/pkg/chunkenc/unordered.go @@ -41,7 +41,6 @@ type HeadBlock interface { mint, maxt int64, pipeline log.StreamPipeline, - options ...iter.EntryIteratorOption, ) iter.EntryIterator SampleIterator( ctx context.Context, @@ -244,12 +243,7 @@ func (hb *unorderedHeadBlock) forEntries( return nil } -func (hb *unorderedHeadBlock) Iterator(ctx context.Context, direction logproto.Direction, mint, maxt int64, pipeline log.StreamPipeline, options ...iter.EntryIteratorOption) iter.EntryIterator { - var iterOptions iter.EntryIteratorOptions - for _, option := range options { - option(&iterOptions) - } - +func (hb *unorderedHeadBlock) Iterator(ctx context.Context, direction logproto.Direction, mint, maxt int64, pipeline log.StreamPipeline) iter.EntryIterator { // We are doing a copy everytime, this is because b.entries could change completely, // the alternate would be that we allocate a new b.entries everytime we cut a block, // but the tradeoff is that queries to near-realtime data would be much lower than @@ -278,18 +272,12 @@ func (hb *unorderedHeadBlock) Iterator(ctx context.Context, direction logproto.D streams[labels] = stream } - entry := logproto.Entry{ - Timestamp: time.Unix(0, ts), - Line: newLine, - } - - // Most of the time, there is no need to send back the structured metadata, as they are already part of the labels results. - // Still it might be needed for example when appending entries from one chunk into another one. - if iterOptions.KeepStructuredMetdata { - entry.StructuredMetadata = logproto.FromLabelsToLabelAdapters(hb.symbolizer.Lookup(structuredMetadataSymbols)) - } - - stream.Entries = append(stream.Entries, entry) + stream.Entries = append(stream.Entries, logproto.Entry{ + Timestamp: time.Unix(0, ts), + Line: newLine, + StructuredMetadata: logproto.FromLabelsToLabelAdapters(parsedLbs.StructuredMetadata()), + Parsed: logproto.FromLabelsToLabelAdapters(parsedLbs.Parsed()), + }) return nil }, ) diff --git a/pkg/chunkenc/unordered_test.go b/pkg/chunkenc/unordered_test.go index 31d7598d2ec85..d92c2f1054bb9 100644 --- a/pkg/chunkenc/unordered_test.go +++ b/pkg/chunkenc/unordered_test.go @@ -22,8 +22,9 @@ func iterEq(t *testing.T, exp []entry, got iter.EntryIterator) { var i int for got.Next() { require.Equal(t, logproto.Entry{ - Timestamp: time.Unix(0, exp[i].t), - Line: exp[i].s, + Timestamp: time.Unix(0, exp[i].t), + Line: exp[i].s, + StructuredMetadata: logproto.FromLabelsToLabelAdapters(exp[i].structuredMetadata), }, got.Entry()) require.Equal(t, exp[i].structuredMetadata.String(), got.Labels()) i++ @@ -445,22 +446,10 @@ func TestUnorderedChunkIterators(t *testing.T) { // ensure head block has data require.Equal(t, false, c.head.IsEmpty()) - forward, err := c.Iterator( - context.Background(), - time.Unix(0, 0), - time.Unix(100, 0), - logproto.FORWARD, - noopStreamPipeline, - ) + forward, err := c.Iterator(context.Background(), time.Unix(0, 0), time.Unix(100, 0), logproto.FORWARD, noopStreamPipeline) require.Nil(t, err) - backward, err := c.Iterator( - context.Background(), - time.Unix(0, 0), - time.Unix(100, 0), - logproto.BACKWARD, - noopStreamPipeline, - ) + backward, err := c.Iterator(context.Background(), time.Unix(0, 0), time.Unix(100, 0), logproto.BACKWARD, noopStreamPipeline) require.Nil(t, err) smpl := c.SampleIterator( diff --git a/pkg/compactor/retention/retention_test.go b/pkg/compactor/retention/retention_test.go index bdd0bd2186cf5..1814c14b76ea0 100644 --- a/pkg/compactor/retention/retention_test.go +++ b/pkg/compactor/retention/retention_test.go @@ -22,7 +22,6 @@ import ( "github.com/grafana/loki/pkg/chunkenc" ingesterclient "github.com/grafana/loki/pkg/ingester/client" - "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql/log" "github.com/grafana/loki/pkg/storage/chunk" @@ -544,7 +543,7 @@ func TestChunkRewriter(t *testing.T) { require.Equal(t, expectedChunks[i][len(expectedChunks[i])-1].End, chunks[i].Through) lokiChunk := chunks[i].Data.(*chunkenc.Facade).LokiChunk() - newChunkItr, err := lokiChunk.Iterator(context.Background(), chunks[i].From.Time(), chunks[i].Through.Add(time.Minute).Time(), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{}), iter.WithKeepStructuredMetadata()) + newChunkItr, err := lokiChunk.Iterator(context.Background(), chunks[i].From.Time(), chunks[i].Through.Add(time.Minute).Time(), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{})) require.NoError(t, err) for _, interval := range expectedChunks[i] { diff --git a/pkg/iter/entry_iterator.go b/pkg/iter/entry_iterator.go index 2d01de4d8fb6a..fa67da6a3bc0a 100644 --- a/pkg/iter/entry_iterator.go +++ b/pkg/iter/entry_iterator.go @@ -19,18 +19,6 @@ type EntryIterator interface { Entry() logproto.Entry } -type EntryIteratorOptions struct { - KeepStructuredMetdata bool -} - -type EntryIteratorOption func(*EntryIteratorOptions) - -func WithKeepStructuredMetadata() EntryIteratorOption { - return func(o *EntryIteratorOptions) { - o.KeepStructuredMetdata = true - } -} - // streamIterator iterates over entries in a stream. type streamIterator struct { i int diff --git a/pkg/loghttp/entry.go b/pkg/loghttp/entry.go index 45e87d301e90f..2a55ac9ecd285 100644 --- a/pkg/loghttp/entry.go +++ b/pkg/loghttp/entry.go @@ -1,6 +1,7 @@ package loghttp import ( + "fmt" "strconv" "time" "unsafe" @@ -20,6 +21,7 @@ type Entry struct { Timestamp time.Time Line string StructuredMetadata labels.Labels + Parsed labels.Labels } func (e *Entry) UnmarshalJSON(data []byte) error { @@ -52,26 +54,57 @@ func (e *Entry) UnmarshalJSON(data []byte) error { return } e.Line = v - case 2: // labels + case 2: // structured metadata if t != jsonparser.Object { parseError = jsonparser.MalformedObjectError return } + + // Here we deserialize entries for both query responses and push requests. + // + // For push requests, we accept structured metadata as the third object in the entry array. E.g.: + // [ "", "", {"trace_id": "0242ac120002", "user_id": "superUser123"}] + // + // For query responses, we accept structured metadata and parsed labels in the third object in the entry array. E.g.: + // [ "", "", { "structuredMetadata": {"trace_id": "0242ac120002", "user_id": "superUser123"}, "parsed": {"msg": "text"}}] + // + // Therefore, we need to check if the third object contains the "structuredMetadata" or "parsed" fields. If it does, + // we deserialize the inner objects into the structured metadata and parsed labels respectively. + // If it doesn't, we deserialize the object into the structured metadata labels. var structuredMetadata labels.Labels + var parsed labels.Labels if err := jsonparser.ObjectEach(value, func(key []byte, value []byte, dataType jsonparser.ValueType, _ int) error { - if dataType != jsonparser.String { - return jsonparser.MalformedStringError + if dataType == jsonparser.Object { + if string(key) == "structuredMetadata" { + lbls, err := parseLabels(value) + if err != nil { + return err + } + structuredMetadata = lbls + } + if string(key) == "parsed" { + lbls, err := parseLabels(value) + if err != nil { + return err + } + parsed = lbls + } + return nil } - structuredMetadata = append(structuredMetadata, labels.Label{ - Name: string(key), - Value: string(value), - }) - return nil + if dataType == jsonparser.String || t != jsonparser.Number { + structuredMetadata = append(structuredMetadata, labels.Label{ + Name: string(key), + Value: string(value), + }) + return nil + } + return fmt.Errorf("could not parse structured metadata or parsed fileds") }); err != nil { parseError = err return } e.StructuredMetadata = structuredMetadata + e.Parsed = parsed } i++ }) @@ -81,6 +114,27 @@ func (e *Entry) UnmarshalJSON(data []byte) error { return err } +func parseLabels(data []byte) (labels.Labels, error) { + var lbls labels.Labels + err := jsonparser.ObjectEach(data, func(key []byte, value []byte, t jsonparser.ValueType, _ int) error { + if t != jsonparser.String && t != jsonparser.Number { + return fmt.Errorf("could not parse label value. Expected string or number, got %s", t) + } + + val, err := jsonparser.ParseString(value) + if err != nil { + return err + } + + lbls = append(lbls, labels.Label{ + Name: string(key), + Value: val, + }) + return nil + }) + return lbls, err +} + type jsonExtension struct { jsoniter.DummyExtension } diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go index c0447ea1157c9..617754393538c 100644 --- a/pkg/loghttp/query.go +++ b/pkg/loghttp/query.go @@ -240,7 +240,10 @@ func (s Streams) ToProto() []logproto.Stream { result := make([]logproto.Stream, 0, len(s)) for _, s := range s { entries := *(*[]logproto.Entry)(unsafe.Pointer(&s.Entries)) - result = append(result, logproto.Stream{Labels: s.Labels.String(), Entries: entries}) + result = append(result, logproto.Stream{ + Labels: s.Labels.String(), + Entries: entries, + }) } return result } diff --git a/pkg/loghttp/query_test.go b/pkg/loghttp/query_test.go index 157f7535d4a28..e94199352f12e 100644 --- a/pkg/loghttp/query_test.go +++ b/pkg/loghttp/query_test.go @@ -151,7 +151,7 @@ func TestStreams_ToProto(t *testing.T) { "some", []Stream{ { - Labels: map[string]string{"foo": "bar"}, + Labels: map[string]string{"job": "fake"}, Entries: []Entry{ {Timestamp: time.Unix(0, 1), Line: "1"}, {Timestamp: time.Unix(0, 2), Line: "2", StructuredMetadata: labels.Labels{ @@ -161,19 +161,20 @@ func TestStreams_ToProto(t *testing.T) { }, }, { - Labels: map[string]string{"foo": "bar", "lvl": "error"}, + Labels: map[string]string{"job": "fake", "lvl": "error"}, Entries: []Entry{ {Timestamp: time.Unix(0, 3), Line: "3"}, - {Timestamp: time.Unix(0, 4), Line: "4", StructuredMetadata: labels.Labels{ - {Name: "foo", Value: "a"}, - {Name: "bar", Value: "b"}, - }}, + {Timestamp: time.Unix(0, 4), Line: "4", + StructuredMetadata: labels.Labels{ + {Name: "foo", Value: "a"}, + {Name: "bar", Value: "b"}, + }}, }, }, }, []logproto.Stream{ { - Labels: `{foo="bar"}`, + Labels: `{job="fake"}`, Entries: []logproto.Entry{ {Timestamp: time.Unix(0, 1), Line: "1"}, {Timestamp: time.Unix(0, 2), Line: "2", StructuredMetadata: []logproto.LabelAdapter{ @@ -183,7 +184,7 @@ func TestStreams_ToProto(t *testing.T) { }, }, { - Labels: `{foo="bar", lvl="error"}`, + Labels: `{job="fake", lvl="error"}`, Entries: []logproto.Entry{ {Timestamp: time.Unix(0, 3), Line: "3"}, {Timestamp: time.Unix(0, 4), Line: "4", StructuredMetadata: []logproto.LabelAdapter{ diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index 4884415c7dd8d..7729f2941f884 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -291,8 +291,11 @@ func (q *query) Eval(ctx context.Context) (promql_parser.Value, error) { return nil, err } + encodingFlags := httpreq.ExtractEncodingFlagsFromCtx(ctx) + categorizeLabels := encodingFlags.Has(httpreq.FlagCategorizeLabels) + defer util.LogErrorWithContext(ctx, "closing iterator", iter.Close) - streams, err := readStreams(iter, q.params.Limit(), q.params.Direction(), q.params.Interval()) + streams, err := readStreams(iter, q.params.Limit(), q.params.Direction(), q.params.Interval(), categorizeLabels) return streams, err default: return nil, fmt.Errorf("unexpected type (%T): cannot evaluate", e) @@ -498,28 +501,55 @@ func PopulateMatrixFromScalar(data promql.Scalar, params Params) promql.Matrix { return promql.Matrix{series} } -func readStreams(i iter.EntryIterator, size uint32, dir logproto.Direction, interval time.Duration) (logqlmodel.Streams, error) { +// readStreams reads the streams from the iterator and returns them sorted. +// If categorizeLabels is true, the stream labels contains just the stream labels and entries inside each stream have their +// structuredMetadata and parsed fields populated with structured metadata labels plus the parsed labels respectively. +// Otherwise, the stream labels are the whole series labels including the stream labels, structured metadata labels and parsed labels. +func readStreams(i iter.EntryIterator, size uint32, dir logproto.Direction, interval time.Duration, categorizeLabels bool) (logqlmodel.Streams, error) { streams := map[string]*logproto.Stream{} respSize := uint32(0) // lastEntry should be a really old time so that the first comparison is always true, we use a negative // value here because many unit tests start at time.Unix(0,0) lastEntry := lastEntryMinTime for respSize < size && i.Next() { - labels, entry := i.Labels(), i.Entry() + entry := i.Entry() + forwardShouldOutput := dir == logproto.FORWARD && - (i.Entry().Timestamp.Equal(lastEntry.Add(interval)) || i.Entry().Timestamp.After(lastEntry.Add(interval))) + (entry.Timestamp.Equal(lastEntry.Add(interval)) || entry.Timestamp.After(lastEntry.Add(interval))) backwardShouldOutput := dir == logproto.BACKWARD && - (i.Entry().Timestamp.Equal(lastEntry.Add(-interval)) || i.Entry().Timestamp.Before(lastEntry.Add(-interval))) + (entry.Timestamp.Equal(lastEntry.Add(-interval)) || entry.Timestamp.Before(lastEntry.Add(-interval))) + // If step == 0 output every line. // If lastEntry.Unix < 0 this is the first pass through the loop and we should output the line. // Then check to see if the entry is equal to, or past a forward or reverse step if interval == 0 || lastEntry.Unix() < 0 || forwardShouldOutput || backwardShouldOutput { - stream, ok := streams[labels] + streamLabels := i.Labels() + + // If categorizeLabels is true, We need to remove the structured metadata labels and parsed labels from the stream labels. + // TODO(salvacorts): If this is too slow, provided this is in the hot path, we can consider doing this in the iterator. + if categorizeLabels && (len(entry.StructuredMetadata) > 0 || len(entry.Parsed) > 0) { + lbls, err := syntax.ParseLabels(streamLabels) + if err != nil { + return nil, fmt.Errorf("failed to parse series labels to categorize labels: %w", err) + } + + builder := labels.NewBuilder(lbls) + for _, label := range entry.StructuredMetadata { + builder.Del(label.Name) + } + for _, label := range entry.Parsed { + builder.Del(label.Name) + } + + streamLabels = builder.Labels().String() + } + + stream, ok := streams[streamLabels] if !ok { stream = &logproto.Stream{ - Labels: labels, + Labels: streamLabels, } - streams[labels] = stream + streams[streamLabels] = stream } stream.Entries = append(stream.Entries, entry) lastEntry = i.Entry().Timestamp diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index c5a1c7235492a..94f4687c35279 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -581,7 +581,7 @@ func TestEngine_LogsInstantQuery(t *testing.T) { {T: 60 * 1000, F: 1.2, Metric: labels.FromStrings("app", "fuzz")}, }, }, - //sort and sort_desc + // sort and sort_desc { `sort(rate(({app=~"foo|bar"} |~".+bar")[1m])) + 1`, time.Unix(60, 0), logproto.FORWARD, 100, [][]logproto.Series{ @@ -1575,7 +1575,7 @@ func TestEngine_RangeQuery(t *testing.T) { }, promql.Matrix{ promql.Series{ - //vector result + // vector result Metric: labels.Labels(nil), Floats: []promql.FPoint{{T: 60000, F: 0}, {T: 80000, F: 0}, {T: 100000, F: 0}, {T: 120000, F: 0}, {T: 140000, F: 0}, {T: 160000, F: 0}, {T: 180000, F: 0}}}, promql.Series{ diff --git a/pkg/logql/log/fmt.go b/pkg/logql/log/fmt.go index 3d79118796f19..e28f5a119a48b 100644 --- a/pkg/logql/log/fmt.go +++ b/pkg/logql/log/fmt.go @@ -383,9 +383,9 @@ func (lf *LabelsFormatter) Process(ts int64, l []byte, lbs *LabelsBuilder) ([]by var data interface{} for _, f := range lf.formats { if f.Rename { - v, ok := lbs.Get(f.Value) + v, category, ok := lbs.GetWithCategory(f.Value) if ok { - lbs.Set(f.Name, v) + lbs.Set(category, f.Name, v) lbs.Del(f.Value) } continue @@ -399,7 +399,7 @@ func (lf *LabelsFormatter) Process(ts int64, l []byte, lbs *LabelsBuilder) ([]by lbs.SetErrorDetails(err.Error()) continue } - lbs.Set(f.Name, lf.buf.String()) + lbs.Set(ParsedLabel, f.Name, lf.buf.String()) } return l, true } diff --git a/pkg/logql/log/ip.go b/pkg/logql/log/ip.go index 81359d2f12acd..cd803e820c10c 100644 --- a/pkg/logql/log/ip.go +++ b/pkg/logql/log/ip.go @@ -3,9 +3,8 @@ package log import ( "errors" "fmt" - "unicode" - "net/netip" + "unicode" "github.com/prometheus/prometheus/model/labels" "go4.org/netipx" diff --git a/pkg/logql/log/labels.go b/pkg/logql/log/labels.go index b9f8f88a213c3..7bc313c8c302c 100644 --- a/pkg/logql/log/labels.go +++ b/pkg/logql/log/labels.go @@ -11,25 +11,38 @@ import ( const MaxInternedStrings = 1024 -var EmptyLabelsResult = NewLabelsResult(labels.Labels{}, labels.Labels{}.Hash()) +var EmptyLabelsResult = NewLabelsResult(labels.EmptyLabels().String(), labels.EmptyLabels().Hash(), labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels()) // LabelsResult is a computed labels result that contains the labels set with associated string and hash. // The is mainly used for caching and returning labels computations out of pipelines and stages. type LabelsResult interface { String() string Labels() labels.Labels + Stream() labels.Labels + StructuredMetadata() labels.Labels + Parsed() labels.Labels Hash() uint64 } -// NewLabelsResult creates a new LabelsResult from a labels set and a hash. -func NewLabelsResult(lbs labels.Labels, hash uint64) LabelsResult { - return &labelsResult{lbs: lbs, s: lbs.String(), h: hash} +// NewLabelsResult creates a new LabelsResult. +// It takes the string representation of the labels, the hash of the labels and the labels categorized. +func NewLabelsResult(allLabelsStr string, hash uint64, stream, structuredMetadata, parsed labels.Labels) LabelsResult { + return &labelsResult{ + s: allLabelsStr, + h: hash, + stream: stream, + structuredMetadata: structuredMetadata, + parsed: parsed, + } } type labelsResult struct { - lbs labels.Labels - s string - h uint64 + s string + h uint64 + + stream labels.Labels + structuredMetadata labels.Labels + parsed labels.Labels } func (l labelsResult) String() string { @@ -37,13 +50,34 @@ func (l labelsResult) String() string { } func (l labelsResult) Labels() labels.Labels { - return l.lbs + return flattenLabels(nil, l.stream, l.structuredMetadata, l.parsed) } func (l labelsResult) Hash() uint64 { return l.h } +func (l labelsResult) Stream() labels.Labels { + if len(l.stream) == 0 { + return nil + } + return l.stream +} + +func (l labelsResult) StructuredMetadata() labels.Labels { + if len(l.structuredMetadata) == 0 { + return nil + } + return l.structuredMetadata +} + +func (l labelsResult) Parsed() labels.Labels { + if len(l.parsed) == 0 { + return nil + } + return l.parsed +} + type hasher struct { buf []byte // buffer for computing hash without bytes slice allocation. } @@ -62,11 +96,37 @@ func (h *hasher) Hash(lbs labels.Labels) uint64 { return hash } +type LabelCategory int + +const ( + StreamLabel LabelCategory = iota + StructuredMetadataLabel + ParsedLabel + InvalidCategory + + numValidCategories = 3 +) + +var allCategories = []LabelCategory{ + StreamLabel, + StructuredMetadataLabel, + ParsedLabel, +} + +func categoriesContain(categories []LabelCategory, category LabelCategory) bool { + for _, c := range categories { + if c == category { + return true + } + } + return false +} + // BaseLabelsBuilder is a label builder used by pipeline and stages. // Only one base builder is used and it contains cache for each LabelsBuilders. type BaseLabelsBuilder struct { del []string - add []labels.Label + add [numValidCategories]labels.Labels // nolint:structcheck // https://github.com/golangci/golangci-lint/issues/826 err string @@ -98,9 +158,14 @@ func NewBaseLabelsBuilderWithGrouping(groups []string, parserKeyHints ParserHint parserKeyHints = noParserHints } + const labelsCapacity = 16 return &BaseLabelsBuilder{ - del: make([]string, 0, 5), - add: make([]labels.Label, 0, 16), + del: make([]string, 0, 5), + add: [numValidCategories]labels.Labels{ + StreamLabel: make(labels.Labels, 0, labelsCapacity), + StructuredMetadataLabel: make(labels.Labels, 0, labelsCapacity), + ParsedLabel: make(labels.Labels, 0, labelsCapacity), + }, resultCache: make(map[uint64]LabelsResult), hasher: newHasher(), groups: groups, @@ -110,7 +175,7 @@ func NewBaseLabelsBuilderWithGrouping(groups []string, parserKeyHints ParserHint } } -// NewLabelsBuilder creates a new base labels builder. +// NewBaseLabelsBuilder creates a new base labels builder. func NewBaseLabelsBuilder() *BaseLabelsBuilder { return NewBaseLabelsBuilderWithGrouping(nil, noParserHints, false, false) } @@ -126,7 +191,7 @@ func (b *BaseLabelsBuilder) ForLabels(lbs labels.Labels, hash uint64) *LabelsBui } return res } - labelResult := NewLabelsResult(lbs, hash) + labelResult := NewLabelsResult(lbs.String(), hash, lbs, labels.EmptyLabels(), labels.EmptyLabels()) b.resultCache[hash] = labelResult res := &LabelsBuilder{ base: lbs, @@ -139,7 +204,9 @@ func (b *BaseLabelsBuilder) ForLabels(lbs labels.Labels, hash uint64) *LabelsBui // Reset clears all current state for the builder. func (b *BaseLabelsBuilder) Reset() { b.del = b.del[:0] - b.add = b.add[:0] + for k := range b.add { + b.add[k] = b.add[k][:0] + } b.err = "" b.errDetails = "" b.parserKeyHints.Reset() @@ -151,6 +218,27 @@ func (b *BaseLabelsBuilder) ParserLabelHints() ParserHint { return b.parserKeyHints } +func (b *BaseLabelsBuilder) hasDel() bool { + return len(b.del) > 0 +} + +func (b *BaseLabelsBuilder) hasAdd() bool { + for _, lbls := range b.add { + if len(lbls) > 0 { + return true + } + } + return false +} + +func (b *BaseLabelsBuilder) sizeAdd() int { + var length int + for _, lbls := range b.add { + length += len(lbls) + } + return length +} + // SetErr sets the error label. func (b *LabelsBuilder) SetErr(err string) *LabelsBuilder { b.err = err @@ -195,33 +283,42 @@ func (b *LabelsBuilder) BaseHas(key string) bool { return b.base.Has(key) } -// Get returns the value of a labels key if it exists. -func (b *LabelsBuilder) Get(key string) (string, bool) { - for _, a := range b.add { - if a.Name == key { - return a.Value, true +// GetWithCategory returns the value and the category of a labels key if it exists. +func (b *LabelsBuilder) GetWithCategory(key string) (string, LabelCategory, bool) { + for category, lbls := range b.add { + for _, l := range lbls { + if l.Name == key { + return l.Value, LabelCategory(category), true + } } } for _, d := range b.del { if d == key { - return "", false + return "", InvalidCategory, false } } for _, l := range b.base { if l.Name == key { - return l.Value, true + return l.Value, StreamLabel, true } } - return "", false + return "", InvalidCategory, false +} + +func (b *LabelsBuilder) Get(key string) (string, bool) { + v, _, ok := b.GetWithCategory(key) + return v, ok } // Del deletes the label of the given name. func (b *LabelsBuilder) Del(ns ...string) *LabelsBuilder { for _, n := range ns { - for i, a := range b.add { - if a.Name == n { - b.add = append(b.add[:i], b.add[i+1:]...) + for category, lbls := range b.add { + for i, a := range lbls { + if a.Name == n { + b.add[category] = append(lbls[:i], lbls[i+1:]...) + } } } b.del = append(b.del, n) @@ -230,14 +327,14 @@ func (b *LabelsBuilder) Del(ns ...string) *LabelsBuilder { } // Set the name/value pair as a label. -func (b *LabelsBuilder) Set(n, v string) *LabelsBuilder { - for i, a := range b.add { +func (b *LabelsBuilder) Set(category LabelCategory, n, v string) *LabelsBuilder { + for i, a := range b.add[category] { if a.Name == n { - b.add[i].Value = v + b.add[category][i].Value = v return b } } - b.add = append(b.add, labels.Label{Name: n, Value: v}) + b.add[category] = append(b.add[category], labels.Label{Name: n, Value: v}) // Sometimes labels are set and later modified. Only record // each label once @@ -247,73 +344,101 @@ func (b *LabelsBuilder) Set(n, v string) *LabelsBuilder { // Add the labels to the builder. If a label with the same name // already exists in the base labels, a suffix is added to the name. -func (b *LabelsBuilder) Add(labels ...labels.Label) *LabelsBuilder { +func (b *LabelsBuilder) Add(category LabelCategory, labels ...labels.Label) *LabelsBuilder { for _, l := range labels { name := l.Name if b.BaseHas(name) { name = fmt.Sprintf("%s%s", name, duplicateSuffix) } - b.Set(name, l.Value) + b.Set(category, name, l.Value) } return b } // Labels returns the labels from the builder. If no modifications // were made, the original labels are returned. -func (b *LabelsBuilder) labels() labels.Labels { - b.buf = b.UnsortedLabels(b.buf) +func (b *LabelsBuilder) labels(categories ...LabelCategory) labels.Labels { + b.buf = b.UnsortedLabels(b.buf, categories...) sort.Sort(b.buf) return b.buf } func (b *LabelsBuilder) appendErrors(buf labels.Labels) labels.Labels { if b.err != "" { - buf = append(buf, labels.Label{Name: logqlmodel.ErrorLabel, Value: b.err}) + buf = append(buf, labels.Label{ + Name: logqlmodel.ErrorLabel, + Value: b.err, + }) } if b.errDetails != "" { - buf = append(buf, labels.Label{Name: logqlmodel.ErrorDetailsLabel, Value: b.errDetails}) + buf = append(buf, labels.Label{ + Name: logqlmodel.ErrorDetailsLabel, + Value: b.errDetails, + }) } return buf } -func (b *LabelsBuilder) UnsortedLabels(buf labels.Labels) labels.Labels { - if len(b.del) == 0 && len(b.add) == 0 { +func (b *LabelsBuilder) UnsortedLabels(buf labels.Labels, categories ...LabelCategory) labels.Labels { + if categories == nil { + categories = allCategories + } + + if !b.hasDel() && !b.hasAdd() && categoriesContain(categories, StreamLabel) { if buf == nil { - buf = make(labels.Labels, 0, len(b.base)+1) + buf = make(labels.Labels, 0, len(b.base)+1) // +1 for error label. } else { buf = buf[:0] } buf = append(buf, b.base...) - return b.appendErrors(buf) + if categoriesContain(categories, ParsedLabel) { + buf = b.appendErrors(buf) + } + + return buf } // In the general case, labels are removed, modified or moved // rather than added. if buf == nil { - buf = make(labels.Labels, 0, len(b.base)+len(b.add)+1) + size := len(b.base) + b.sizeAdd() + 1 + buf = make(labels.Labels, 0, size) } else { buf = buf[:0] } -Outer: - for _, l := range b.base { - for _, n := range b.del { - if l.Name == n { - continue Outer + if categoriesContain(categories, StreamLabel) { + Outer: + for _, l := range b.base { + // Skip stream labels to be deleted + for _, n := range b.del { + if l.Name == n { + continue Outer + } } - } - for _, la := range b.add { - if l.Name == la.Name { - continue Outer + // Skip stream labels which value will be replaced + for _, lbls := range b.add { + for _, la := range lbls { + if l.Name == la.Name { + continue Outer + } + } } + buf = append(buf, l) } - buf = append(buf, l) } - buf = append(buf, b.add...) - return b.appendErrors(buf) + + for _, category := range categories { + buf = append(buf, b.add[category]...) + } + if (b.HasErr() || b.HasErrorDetails()) && categoriesContain(categories, ParsedLabel) { + buf = b.appendErrors(buf) + } + + return buf } func (b *LabelsBuilder) Map() map[string]string { - if len(b.del) == 0 && len(b.add) == 0 && b.err == "" { + if !b.hasDel() && !b.hasAdd() && !b.HasErr() { if b.baseMap == nil { b.baseMap = b.base.Map() } @@ -333,18 +458,51 @@ func (b *LabelsBuilder) Map() map[string]string { // No grouping is applied and the cache is used when possible. func (b *LabelsBuilder) LabelsResult() LabelsResult { // unchanged path. - if len(b.del) == 0 && len(b.add) == 0 && b.err == "" { + if !b.hasDel() && !b.hasAdd() && !b.HasErr() { return b.currentResult } - return b.toResult(b.labels()) + + stream := b.labels(StreamLabel).Copy() + structuredMetadata := b.labels(StructuredMetadataLabel).Copy() + parsed := b.labels(ParsedLabel).Copy() + b.buf = flattenLabels(b.buf, stream, structuredMetadata, parsed) + hash := b.hasher.Hash(b.buf) + if cached, ok := b.resultCache[hash]; ok { + return cached + } + + result := NewLabelsResult(b.buf.String(), hash, stream, structuredMetadata, parsed) + b.resultCache[hash] = result + + return result } -func (b *BaseLabelsBuilder) toResult(buf labels.Labels) LabelsResult { +func flattenLabels(buf labels.Labels, many ...labels.Labels) labels.Labels { + var size int + for _, lbls := range many { + size += len(lbls) + } + + if buf == nil || cap(buf) < size { + buf = make(labels.Labels, 0, size) + } else { + buf = buf[:0] + } + + for _, lbls := range many { + buf = append(buf, lbls...) + } + sort.Sort(buf) + return buf +} + +func (b *BaseLabelsBuilder) toUncategorizedResult(buf labels.Labels) LabelsResult { hash := b.hasher.Hash(buf) if cached, ok := b.resultCache[hash]; ok { return cached } - res := NewLabelsResult(buf.Copy(), hash) + + res := NewLabelsResult(buf.String(), hash, buf.Copy(), nil, nil) b.resultCache[hash] = res return res } @@ -352,7 +510,7 @@ func (b *BaseLabelsBuilder) toResult(buf labels.Labels) LabelsResult { // GroupedLabels returns the LabelsResult from the builder. // Groups are applied and the cache is used when possible. func (b *LabelsBuilder) GroupedLabels() LabelsResult { - if b.err != "" { + if b.HasErr() { // We need to return now before applying grouping otherwise the error might get lost. return b.LabelsResult() } @@ -360,7 +518,7 @@ func (b *LabelsBuilder) GroupedLabels() LabelsResult { return EmptyLabelsResult } // unchanged path. - if len(b.del) == 0 && len(b.add) == 0 { + if !b.hasDel() && !b.hasAdd() { if len(b.groups) == 0 { return b.currentResult } @@ -391,9 +549,11 @@ Outer: } } for _, la := range b.add { - if g == la.Name { - b.buf = append(b.buf, la) - continue Outer + for _, l := range la { + if g == l.Name { + b.buf = append(b.buf, l) + continue Outer + } } } for _, l := range b.base { @@ -403,12 +563,12 @@ Outer: } } } - return b.toResult(b.buf) + return b.toUncategorizedResult(b.buf) } func (b *LabelsBuilder) withoutResult() LabelsResult { if b.buf == nil { - size := len(b.base) + len(b.add) - len(b.del) - len(b.groups) + size := len(b.base) + b.sizeAdd() - len(b.del) - len(b.groups) if size < 0 { size = 0 } @@ -423,9 +583,11 @@ Outer: continue Outer } } - for _, la := range b.add { - if l.Name == la.Name { - continue Outer + for _, lbls := range b.add { + for _, la := range lbls { + if l.Name == la.Name { + continue Outer + } } } for _, lg := range b.groups { @@ -435,17 +597,20 @@ Outer: } b.buf = append(b.buf, l) } -OuterAdd: - for _, la := range b.add { - for _, lg := range b.groups { - if la.Name == lg { - continue OuterAdd + + for _, lbls := range b.add { + OuterAdd: + for _, la := range lbls { + for _, lg := range b.groups { + if la.Name == lg { + continue OuterAdd + } } + b.buf = append(b.buf, la) } - b.buf = append(b.buf, la) } sort.Sort(b.buf) - return b.toResult(b.buf) + return b.toUncategorizedResult(b.buf) } func (b *LabelsBuilder) toBaseGroup() LabelsResult { @@ -458,7 +623,7 @@ func (b *LabelsBuilder) toBaseGroup() LabelsResult { } else { lbs = labels.NewBuilder(b.base).Keep(b.groups...).Labels() } - res := NewLabelsResult(lbs, lbs.Hash()) + res := NewLabelsResult(lbs.String(), lbs.Hash(), lbs, nil, nil) b.groupedResult = res return res } diff --git a/pkg/logql/log/labels_test.go b/pkg/logql/log/labels_test.go index c31296100c6fc..25cebfc5ffa9b 100644 --- a/pkg/logql/log/labels_test.go +++ b/pkg/logql/log/labels_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/logqlmodel" @@ -13,22 +14,24 @@ func TestLabelsBuilder_Get(t *testing.T) { lbs := labels.FromStrings("already", "in") b := NewBaseLabelsBuilder().ForLabels(lbs, lbs.Hash()) b.Reset() - b.Set("foo", "bar") - b.Set("bar", "buzz") + b.Set(StructuredMetadataLabel, "foo", "bar") + b.Set(ParsedLabel, "bar", "buzz") b.Del("foo") - _, ok := b.Get("foo") + _, _, ok := b.GetWithCategory("foo") require.False(t, ok) - v, ok := b.Get("bar") + v, category, ok := b.GetWithCategory("bar") require.True(t, ok) require.Equal(t, "buzz", v) - v, ok = b.Get("already") + require.Equal(t, ParsedLabel, category) + v, category, ok = b.GetWithCategory("already") require.True(t, ok) require.Equal(t, "in", v) + require.Equal(t, StreamLabel, category) b.Del("bar") - _, ok = b.Get("bar") + _, _, ok = b.GetWithCategory("bar") require.False(t, ok) b.Del("already") - _, ok = b.Get("already") + _, _, ok = b.GetWithCategory("already") require.False(t, ok) } @@ -37,22 +40,30 @@ func TestLabelsBuilder_LabelsError(t *testing.T) { b := NewBaseLabelsBuilder().ForLabels(lbs, lbs.Hash()) b.Reset() b.SetErr("err") - lbsWithErr := b.LabelsResult().Labels() - require.Equal( - t, - labels.FromStrings(logqlmodel.ErrorLabel, "err", - "already", "in", - ), - lbsWithErr, + lbsWithErr := b.LabelsResult() + + expectedLbs := labels.FromStrings( + logqlmodel.ErrorLabel, "err", + "already", "in", ) + require.Equal(t, expectedLbs, lbsWithErr.Labels()) + require.Equal(t, expectedLbs.String(), lbsWithErr.String()) + require.Equal(t, expectedLbs.Hash(), lbsWithErr.Hash()) + require.Equal(t, labels.FromStrings("already", "in"), lbsWithErr.Stream()) + require.Nil(t, lbsWithErr.StructuredMetadata()) + require.Equal(t, labels.FromStrings(logqlmodel.ErrorLabel, "err"), lbsWithErr.Parsed()) + // make sure the original labels is unchanged. require.Equal(t, labels.FromStrings("already", "in"), lbs) } func TestLabelsBuilder_LabelsResult(t *testing.T) { - strs := []string{"namespace", "loki", + strs := []string{ + "namespace", "loki", "job", "us-central1/loki", - "cluster", "us-central1"} + "cluster", "us-central1", + "ToReplace", "text", + } lbs := labels.FromStrings(strs...) b := NewBaseLabelsBuilder().ForLabels(lbs, lbs.Hash()) b.Reset() @@ -61,19 +72,38 @@ func TestLabelsBuilder_LabelsResult(t *testing.T) { withErr := labels.FromStrings(append(strs, logqlmodel.ErrorLabel, "err")...) assertLabelResult(t, withErr, b.LabelsResult()) - b.Set("foo", "bar") - b.Set("namespace", "tempo") - b.Set("buzz", "fuzz") + b.Set(StructuredMetadataLabel, "foo", "bar") + b.Set(StreamLabel, "namespace", "tempo") + b.Set(ParsedLabel, "buzz", "fuzz") + b.Set(ParsedLabel, "ToReplace", "other") b.Del("job") - expected := labels.FromStrings(logqlmodel.ErrorLabel, "err", + + expectedStreamLbls := labels.FromStrings( "namespace", "tempo", "cluster", "us-central1", + ) + expectedStucturedMetadataLbls := labels.FromStrings( "foo", "bar", + ) + expectedParsedLbls := labels.FromStrings( + logqlmodel.ErrorLabel, "err", "buzz", "fuzz", + "ToReplace", "other", ) + expected := make(labels.Labels, 0, len(expectedStreamLbls)+len(expectedStucturedMetadataLbls)+len(expectedParsedLbls)) + expected = append(expected, expectedStreamLbls...) + expected = append(expected, expectedStucturedMetadataLbls...) + expected = append(expected, expectedParsedLbls...) + expected = labels.New(expected...) + assertLabelResult(t, expected, b.LabelsResult()) // cached. assertLabelResult(t, expected, b.LabelsResult()) + + actual := b.LabelsResult() + assert.Equal(t, expectedStreamLbls, actual.Stream()) + assert.Equal(t, expectedStucturedMetadataLbls, actual.StructuredMetadata()) + assert.Equal(t, expectedParsedLbls, actual.Parsed()) } func TestLabelsBuilder_GroupedLabelsResult(t *testing.T) { @@ -89,9 +119,9 @@ func TestLabelsBuilder_GroupedLabelsResult(t *testing.T) { assertLabelResult(t, withErr, b.GroupedLabels()) b.Reset() - b.Set("foo", "bar") - b.Set("namespace", "tempo") - b.Set("buzz", "fuzz") + b.Set(StructuredMetadataLabel, "foo", "bar") + b.Set(StreamLabel, "namespace", "tempo") + b.Set(ParsedLabel, "buzz", "fuzz") b.Del("job") expected := labels.FromStrings("namespace", "tempo") assertLabelResult(t, expected, b.GroupedLabels()) @@ -104,13 +134,13 @@ func TestLabelsBuilder_GroupedLabelsResult(t *testing.T) { b.Del("job") assertLabelResult(t, labels.EmptyLabels(), b.GroupedLabels()) b.Reset() - b.Set("namespace", "tempo") + b.Set(StreamLabel, "namespace", "tempo") assertLabelResult(t, labels.FromStrings("job", "us-central1/loki"), b.GroupedLabels()) b = NewBaseLabelsBuilderWithGrouping([]string{"job"}, nil, true, false).ForLabels(lbs, lbs.Hash()) b.Del("job") - b.Set("foo", "bar") - b.Set("job", "something") + b.Set(StructuredMetadataLabel, "foo", "bar") + b.Set(StreamLabel, "job", "something") expected = labels.FromStrings("namespace", "loki", "cluster", "us-central1", "foo", "bar", @@ -118,8 +148,8 @@ func TestLabelsBuilder_GroupedLabelsResult(t *testing.T) { assertLabelResult(t, expected, b.GroupedLabels()) b = NewBaseLabelsBuilderWithGrouping(nil, nil, false, false).ForLabels(lbs, lbs.Hash()) - b.Set("foo", "bar") - b.Set("job", "something") + b.Set(StructuredMetadataLabel, "foo", "bar") + b.Set(StreamLabel, "job", "something") expected = labels.FromStrings("namespace", "loki", "job", "something", "cluster", "us-central1", diff --git a/pkg/logql/log/metrics_extraction.go b/pkg/logql/log/metrics_extraction.go index ea900e2cf5b59..cd4ef3b8e7af7 100644 --- a/pkg/logql/log/metrics_extraction.go +++ b/pkg/logql/log/metrics_extraction.go @@ -82,7 +82,7 @@ type streamLineSampleExtractor struct { func (l *streamLineSampleExtractor) Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) { l.builder.Reset() - l.builder.Add(structuredMetadata...) + l.builder.Add(StructuredMetadataLabel, structuredMetadata...) // short circuit. if l.Stage == NoopStage { @@ -174,7 +174,7 @@ func (l *labelSampleExtractor) ForStream(labels labels.Labels) StreamSampleExtra func (l *streamLabelSampleExtractor) Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) { // Apply the pipeline first. l.builder.Reset() - l.builder.Add(structuredMetadata...) + l.builder.Add(StructuredMetadataLabel, structuredMetadata...) line, ok := l.preStage.Process(ts, line, l.builder) if !ok { return 0, nil, false diff --git a/pkg/logql/log/parser.go b/pkg/logql/log/parser.go index e1018d2dbeb6d..be059a2831560 100644 --- a/pkg/logql/log/parser.go +++ b/pkg/logql/log/parser.go @@ -136,7 +136,7 @@ func (j *JSONParser) parseLabelValue(key, value []byte, dataType jsonparser.Valu if !ok { return nil } - j.lbs.Set(key, readValue(value, dataType)) + j.lbs.Set(ParsedLabel, key, readValue(value, dataType)) if !j.parserHints.ShouldContinueParsingLine(key, j.lbs) { return errLabelDoesNotMatch } @@ -166,7 +166,7 @@ func (j *JSONParser) parseLabelValue(key, value []byte, dataType jsonparser.Valu return nil } - j.lbs.Set(keyString, readValue(value, dataType)) + j.lbs.Set(ParsedLabel, keyString, readValue(value, dataType)) if !j.parserHints.ShouldContinueParsingLine(keyString, j.lbs) { return errLabelDoesNotMatch } @@ -272,7 +272,7 @@ func (r *RegexpParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte continue } - lbs.Set(key, string(value)) + lbs.Set(ParsedLabel, key, string(value)) if !parserHints.ShouldContinueParsingLine(key, lbs) { return line, false } @@ -348,7 +348,7 @@ func (l *LogfmtParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte continue } - lbs.Set(key, string(val)) + lbs.Set(ParsedLabel, key, string(val)) if !parserHints.ShouldContinueParsingLine(key, lbs) { return line, false } @@ -410,7 +410,7 @@ func (l *PatternParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byt continue } - lbs.Set(name, string(m)) + lbs.Set(ParsedLabel, name, string(m)) if !parserHints.ShouldContinueParsingLine(name, lbs) { return line, false } @@ -469,7 +469,7 @@ func (l *LogfmtExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilde for id, paths := range l.expressions { keys[id] = fmt.Sprintf("%v", paths...) if !lbs.BaseHas(id) { - lbs.Set(id, "") + lbs.Set(ParsedLabel, id, "") } } @@ -523,7 +523,7 @@ func (l *LogfmtExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilde } } - lbs.Set(key, string(val)) + lbs.Set(ParsedLabel, key, string(val)) if lbs.ParserLabelHints().AllRequiredExtracted() { break @@ -613,9 +613,9 @@ func (j *JSONExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilder) switch typ { case jsonparser.Null: - lbs.Set(key, "") + lbs.Set(ParsedLabel, key, "") default: - lbs.Set(key, unescapeJSONString(data)) + lbs.Set(ParsedLabel, key, unescapeJSONString(data)) } matches++ @@ -625,7 +625,7 @@ func (j *JSONExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilder) if matches < len(j.ids) { for _, id := range j.ids { if _, ok := lbs.Get(id); !ok { - lbs.Set(id, "") + lbs.Set(ParsedLabel, id, "") } } } @@ -695,7 +695,7 @@ func addErrLabel(msg string, err error, lbs *LabelsBuilder) { } if lbs.ParserLabelHints().PreserveError() { - lbs.Set(logqlmodel.PreserveErrorLabel, "true") + lbs.Set(ParsedLabel, logqlmodel.PreserveErrorLabel, "true") } } @@ -746,7 +746,7 @@ func (u *UnpackParser) unpack(entry []byte, lbs *LabelsBuilder) ([]byte, error) // flush the buffer if we found a packed entry. if isPacked { for i := 0; i < len(u.lbsBuffer); i = i + 2 { - lbs.Set(u.lbsBuffer[i], u.lbsBuffer[i+1]) + lbs.Set(ParsedLabel, u.lbsBuffer[i], u.lbsBuffer[i+1]) if !lbs.ParserLabelHints().ShouldContinueParsingLine(u.lbsBuffer[i], lbs) { return entry, errLabelDoesNotMatch } diff --git a/pkg/logql/log/parser_test.go b/pkg/logql/log/parser_test.go index a81748775304f..246dbed499c9e 100644 --- a/pkg/logql/log/parser_test.go +++ b/pkg/logql/log/parser_test.go @@ -187,8 +187,9 @@ func TestLabelShortCircuit(t *testing.T) { _, result = tt.p.Process(0, tt.line, lbs) require.Len(t, lbs.labels(), 1) - name, ok := lbs.Get("name") + name, category, ok := lbs.GetWithCategory("name") require.True(t, ok) + require.Equal(t, ParsedLabel, category) require.Contains(t, name, "text1") }) } diff --git a/pkg/logql/log/pipeline.go b/pkg/logql/log/pipeline.go index e504370e7a262..31665e7b303ae 100644 --- a/pkg/logql/log/pipeline.go +++ b/pkg/logql/log/pipeline.go @@ -89,7 +89,7 @@ type noopStreamPipeline struct { func (n noopStreamPipeline) Process(_ int64, line []byte, structuredMetadata ...labels.Label) ([]byte, LabelsResult, bool) { n.builder.Reset() - n.builder.Add(structuredMetadata...) + n.builder.Add(StructuredMetadataLabel, structuredMetadata...) return line, n.builder.LabelsResult(), true } @@ -204,7 +204,7 @@ func (p *pipeline) Reset() { func (p *streamPipeline) Process(ts int64, line []byte, structuredMetadata ...labels.Label) ([]byte, LabelsResult, bool) { var ok bool p.builder.Reset() - p.builder.Add(structuredMetadata...) + p.builder.Add(StructuredMetadataLabel, structuredMetadata...) for _, s := range p.stages { line, ok = s.Process(ts, line, p.builder) diff --git a/pkg/logql/log/pipeline_test.go b/pkg/logql/log/pipeline_test.go index a85c84434179c..5a6f40fa76995 100644 --- a/pkg/logql/log/pipeline_test.go +++ b/pkg/logql/log/pipeline_test.go @@ -16,39 +16,44 @@ func TestNoopPipeline(t *testing.T) { l, lbr, matches := pipeline.ForStream(lbs).Process(0, []byte("")) require.Equal(t, []byte(""), l) - require.Equal(t, NewLabelsResult(lbs, lbs.Hash()), lbr) + require.Equal(t, NewLabelsResult(lbs.String(), lbs.Hash(), lbs, labels.EmptyLabels(), labels.EmptyLabels()), lbr) + require.Equal(t, lbs.Hash(), lbr.Hash()) + require.Equal(t, lbs.String(), lbr.String()) require.Equal(t, true, matches) ls, lbr, matches := pipeline.ForStream(lbs).ProcessString(0, "") require.Equal(t, "", ls) - require.Equal(t, NewLabelsResult(lbs, lbs.Hash()), lbr) + require.Equal(t, NewLabelsResult(lbs.String(), lbs.Hash(), lbs, labels.EmptyLabels(), labels.EmptyLabels()), lbr) + require.Equal(t, lbs.Hash(), lbr.Hash()) + require.Equal(t, lbs.String(), lbr.String()) require.Equal(t, true, matches) - structuredMetadata := labels.Labels{ - {Name: "y", Value: "1"}, - {Name: "z", Value: "2"}, - } + structuredMetadata := labels.FromStrings("y", "1", "z", "2") expectedLabelsResults := append(lbs, structuredMetadata...) l, lbr, matches = pipeline.ForStream(lbs).Process(0, []byte(""), structuredMetadata...) require.Equal(t, []byte(""), l) - require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr) + require.Equal(t, NewLabelsResult(expectedLabelsResults.String(), expectedLabelsResults.Hash(), lbs, structuredMetadata, labels.EmptyLabels()), lbr) + require.Equal(t, expectedLabelsResults.Hash(), lbr.Hash()) + require.Equal(t, expectedLabelsResults.String(), lbr.String()) require.Equal(t, true, matches) ls, lbr, matches = pipeline.ForStream(lbs).ProcessString(0, "", structuredMetadata...) require.Equal(t, "", ls) - require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr) + require.Equal(t, NewLabelsResult(expectedLabelsResults.String(), expectedLabelsResults.Hash(), lbs, structuredMetadata, labels.EmptyLabels()), lbr) + require.Equal(t, expectedLabelsResults.Hash(), lbr.Hash()) + require.Equal(t, expectedLabelsResults.String(), lbr.String()) require.Equal(t, true, matches) // test duplicated structured metadata with stream labels - expectedLabelsResults = append(lbs, labels.Label{ - Name: "foo_extracted", Value: "baz", - }) - expectedLabelsResults = append(expectedLabelsResults, structuredMetadata...) + expectedNonIndexedLabels := labels.FromStrings("foo_extracted", "baz", "y", "1", "z", "2") + expectedLabelsResults = labels.FromStrings("foo", "bar", "foo_extracted", "baz", "y", "1", "z", "2") l, lbr, matches = pipeline.ForStream(lbs).Process(0, []byte(""), append(structuredMetadata, labels.Label{ Name: "foo", Value: "baz", })...) require.Equal(t, []byte(""), l) - require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr) + require.Equal(t, NewLabelsResult(expectedLabelsResults.String(), expectedLabelsResults.Hash(), lbs, expectedNonIndexedLabels, labels.EmptyLabels()), lbr) + require.Equal(t, expectedLabelsResults.Hash(), lbr.Hash()) + require.Equal(t, expectedLabelsResults.String(), lbr.String()) require.Equal(t, true, matches) pipeline.Reset() @@ -64,12 +69,16 @@ func TestPipeline(t *testing.T) { l, lbr, matches := p.ForStream(lbs).Process(0, []byte("line")) require.Equal(t, []byte("lbs bar"), l) - require.Equal(t, NewLabelsResult(lbs, lbs.Hash()), lbr) + require.Equal(t, NewLabelsResult(lbs.String(), lbs.Hash(), lbs, labels.EmptyLabels(), labels.EmptyLabels()), lbr) + require.Equal(t, lbs.Hash(), lbr.Hash()) + require.Equal(t, lbs.String(), lbr.String()) require.Equal(t, true, matches) ls, lbr, matches := p.ForStream(lbs).ProcessString(0, "line") require.Equal(t, "lbs bar", ls) - require.Equal(t, NewLabelsResult(lbs, lbs.Hash()), lbr) + require.Equal(t, NewLabelsResult(lbs.String(), lbs.Hash(), lbs, labels.EmptyLabels(), labels.EmptyLabels()), lbr) + require.Equal(t, lbs.Hash(), lbr.Hash()) + require.Equal(t, lbs.String(), lbr.String()) require.Equal(t, true, matches) l, lbr, matches = p.ForStream(labels.EmptyLabels()).Process(0, []byte("line")) @@ -84,12 +93,16 @@ func TestPipeline(t *testing.T) { // Reset caches p.baseBuilder.del = []string{"foo", "bar"} - p.baseBuilder.add = labels.FromStrings("baz", "blip") + p.baseBuilder.add = [numValidCategories]labels.Labels{ + ParsedLabel: labels.FromStrings("baz", "blip"), + } p.Reset() require.Len(t, p.streamPipelines, 0) require.Len(t, p.baseBuilder.del, 0) - require.Len(t, p.baseBuilder.add, 0) + for _, v := range p.baseBuilder.add { + require.Len(t, v, 0) + } } func TestPipelineWithStructuredMetadata(t *testing.T) { @@ -104,31 +117,38 @@ func TestPipelineWithStructuredMetadata(t *testing.T) { l, lbr, matches := p.ForStream(lbs).Process(0, []byte("line"), structuredMetadata...) require.Equal(t, []byte("lbs bar bob"), l) - require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr) + require.Equal(t, NewLabelsResult(expectedLabelsResults.String(), expectedLabelsResults.Hash(), lbs, structuredMetadata, labels.EmptyLabels()), lbr) + require.Equal(t, expectedLabelsResults.Hash(), lbr.Hash()) + require.Equal(t, expectedLabelsResults.String(), lbr.String()) require.Equal(t, true, matches) ls, lbr, matches := p.ForStream(lbs).ProcessString(0, "line", structuredMetadata...) require.Equal(t, "lbs bar bob", ls) - require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr) + require.Equal(t, NewLabelsResult(expectedLabelsResults.String(), expectedLabelsResults.Hash(), lbs, structuredMetadata, labels.EmptyLabels()), lbr) + require.Equal(t, expectedLabelsResults.Hash(), lbr.Hash()) + require.Equal(t, expectedLabelsResults.String(), lbr.String()) require.Equal(t, true, matches) // test duplicated structured metadata with stream labels - expectedLabelsResults = append(lbs, labels.Label{ - Name: "foo_extracted", Value: "baz", - }) + expectedNonIndexedLabels := labels.FromStrings("user", "bob", "foo_extracted", "baz") + expectedLabelsResults = labels.FromStrings("foo", "bar", "foo_extracted", "baz") expectedLabelsResults = append(expectedLabelsResults, structuredMetadata...) l, lbr, matches = p.ForStream(lbs).Process(0, []byte("line"), append(structuredMetadata, labels.Label{ Name: "foo", Value: "baz", })...) require.Equal(t, []byte("lbs bar bob"), l) - require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr) + require.Equal(t, NewLabelsResult(expectedLabelsResults.String(), expectedLabelsResults.Hash(), lbs, expectedNonIndexedLabels, labels.EmptyLabels()), lbr) + require.Equal(t, expectedLabelsResults.Hash(), lbr.Hash()) + require.Equal(t, expectedLabelsResults.String(), lbr.String()) require.Equal(t, true, matches) ls, lbr, matches = p.ForStream(lbs).ProcessString(0, "line", append(structuredMetadata, labels.Label{ Name: "foo", Value: "baz", })...) require.Equal(t, "lbs bar bob", ls) - require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr) + require.Equal(t, NewLabelsResult(expectedLabelsResults.String(), expectedLabelsResults.Hash(), lbs, expectedNonIndexedLabels, labels.EmptyLabels()), lbr) + require.Equal(t, expectedLabelsResults.Hash(), lbr.Hash()) + require.Equal(t, expectedLabelsResults.String(), lbr.String()) require.Equal(t, true, matches) l, lbr, matches = p.ForStream(lbs).Process(0, []byte("line")) @@ -153,12 +173,16 @@ func TestPipelineWithStructuredMetadata(t *testing.T) { // Reset caches p.baseBuilder.del = []string{"foo", "bar"} - p.baseBuilder.add = labels.FromStrings("baz", "blip") + p.baseBuilder.add = [numValidCategories]labels.Labels{ + ParsedLabel: labels.FromStrings("baz", "blip"), + } p.Reset() require.Len(t, p.streamPipelines, 0) require.Len(t, p.baseBuilder.del, 0) - require.Len(t, p.baseBuilder.add, 0) + for _, v := range p.baseBuilder.add { + require.Len(t, v, 0) + } } func TestFilteringPipeline(t *testing.T) { @@ -358,6 +382,10 @@ func TestDropLabelsPipeline(t *testing.T) { for i, line := range tt.lines { _, finalLbs, _ := sp.Process(0, line) require.Equal(t, tt.wantLabels[i], finalLbs.Labels()) + require.Nil(t, finalLbs.Stream()) + require.Nil(t, finalLbs.StructuredMetadata()) + require.Equal(t, tt.wantLabels[i], finalLbs.Parsed()) + require.Equal(t, tt.wantLabels[i].Hash(), finalLbs.Hash()) } } @@ -436,7 +464,7 @@ func TestKeepLabelsPipeline(t *testing.T) { labels.FromStrings( "level", "debug", ), - {}, + labels.EmptyLabels(), }, }, { @@ -464,8 +492,8 @@ func TestKeepLabelsPipeline(t *testing.T) { labels.FromStrings( "level", "info", ), - {}, - {}, + labels.EmptyLabels(), + labels.EmptyLabels(), }, }, } { @@ -476,6 +504,15 @@ func TestKeepLabelsPipeline(t *testing.T) { finalLine, finalLbs, _ := sp.Process(0, line) require.Equal(t, tt.wantLine[i], finalLine) require.Equal(t, tt.wantLabels[i], finalLbs.Labels()) + require.Nil(t, finalLbs.Stream()) + require.Nil(t, finalLbs.StructuredMetadata()) + if len(tt.wantLabels[i]) > 0 { + require.Equal(t, tt.wantLabels[i], finalLbs.Parsed()) + } else { + require.Nil(t, finalLbs.Parsed()) + } + require.Equal(t, tt.wantLabels[i].Hash(), finalLbs.Hash()) + require.Equal(t, tt.wantLabels[i].String(), finalLbs.String()) } }) } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index bca82aaa5554a..6913752166d04 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -387,6 +387,7 @@ func (t *Loki) initQuerier() (services.Service, error) { toMerge := []middleware.Interface{ httpreq.ExtractQueryMetricsMiddleware(), httpreq.ExtractQueryTagsMiddleware(), + httpreq.PropagateHeadersMiddleware(httpreq.LokiEncodingFlagsHeader), serverutil.RecoveryHTTPMiddleware, t.HTTPAuthMiddleware, serverutil.NewPrepopulateMiddleware(), @@ -898,7 +899,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { toMerge := []middleware.Interface{ httpreq.ExtractQueryTagsMiddleware(), - httpreq.PropagateHeadersMiddleware(httpreq.LokiActorPathHeader), + httpreq.PropagateHeadersMiddleware(httpreq.LokiActorPathHeader, httpreq.LokiEncodingFlagsHeader), serverutil.RecoveryHTTPMiddleware, t.HTTPAuthMiddleware, queryrange.StatsHTTPMiddleware, @@ -1402,7 +1403,7 @@ func (t *Loki) initBloomCompactorRing() (services.Service, error) { t.Cfg.BloomCompactor.Ring.ListenPort = t.Cfg.Server.GRPCListenPort // is LegacyMode needed? - //legacyReadMode := t.Cfg.LegacyReadTarget && t.isModuleActive(Read) + // legacyReadMode := t.Cfg.LegacyReadTarget && t.isModuleActive(Read) rm, err := lokiring.NewRingManager(bloomCompactorRingKey, lokiring.ServerMode, t.Cfg.BloomCompactor.Ring, 1, 1, util_log.Logger, prometheus.DefaultRegisterer) diff --git a/pkg/push/push.pb.go b/pkg/push/push.pb.go index 441c4409f311b..3b07d850ff162 100644 --- a/pkg/push/push.pb.go +++ b/pkg/push/push.pb.go @@ -219,6 +219,10 @@ type EntryAdapter struct { Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"` Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"` StructuredMetadata []LabelPairAdapter `protobuf:"bytes,3,rep,name=structuredMetadata,proto3" json:"structuredMetadata,omitempty"` + // This field shouldn't be used by clients to push data to Loki. + // It is only used by Loki to return parsed log lines in query responses. + // TODO: Remove this field from the write path Proto. + Parsed []LabelPairAdapter `protobuf:"bytes,4,rep,name=parsed,proto3" json:"parsed,omitempty"` } func (m *EntryAdapter) Reset() { *m = EntryAdapter{} } @@ -274,6 +278,13 @@ func (m *EntryAdapter) GetStructuredMetadata() []LabelPairAdapter { return nil } +func (m *EntryAdapter) GetParsed() []LabelPairAdapter { + if m != nil { + return m.Parsed + } + return nil +} + func init() { proto.RegisterType((*PushRequest)(nil), "logproto.PushRequest") proto.RegisterType((*PushResponse)(nil), "logproto.PushResponse") @@ -285,39 +296,40 @@ func init() { func init() { proto.RegisterFile("pkg/push/push.proto", fileDescriptor_35ec442956852c9e) } var fileDescriptor_35ec442956852c9e = []byte{ - // 503 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xf6, 0x25, 0x69, 0xda, 0x5e, 0x4a, 0x41, 0x47, 0x5b, 0x8c, 0x55, 0x9d, 0x23, 0x8b, 0x21, - 0x03, 0xd8, 0x52, 0x18, 0x58, 0x58, 0x62, 0x09, 0xa9, 0x03, 0x48, 0x95, 0x41, 0x20, 0xb1, 0x5d, - 0x9a, 0xab, 0x6d, 0xd5, 0xf6, 0x99, 0xbb, 0x33, 0x52, 0x37, 0x7e, 0x42, 0xf9, 0x17, 0xfc, 0x94, - 0x8e, 0x19, 0x2b, 0x06, 0x43, 0x9c, 0xa5, 0xca, 0xd4, 0x9f, 0x80, 0x7c, 0xf6, 0x91, 0x52, 0xba, - 0x9c, 0xbf, 0xf7, 0xdd, 0x7b, 0xef, 0xfb, 0xfc, 0x9e, 0x0d, 0x1f, 0xe7, 0x67, 0xa1, 0x97, 0x17, - 0x22, 0x52, 0x87, 0x9b, 0x73, 0x26, 0x19, 0xda, 0x4a, 0x58, 0xa8, 0x90, 0xb5, 0x17, 0xb2, 0x90, - 0x29, 0xe8, 0xd5, 0xa8, 0xb9, 0xb7, 0xec, 0x90, 0xb1, 0x30, 0xa1, 0x9e, 0x8a, 0xa6, 0xc5, 0xa9, - 0x27, 0xe3, 0x94, 0x0a, 0x49, 0xd2, 0xbc, 0x49, 0x70, 0x3e, 0xc1, 0xc1, 0x71, 0x21, 0xa2, 0x80, - 0x7e, 0x29, 0xa8, 0x90, 0xe8, 0x08, 0x6e, 0x0a, 0xc9, 0x29, 0x49, 0x85, 0x09, 0x86, 0xdd, 0xd1, - 0x60, 0xfc, 0xc4, 0xd5, 0x0a, 0xee, 0x7b, 0x75, 0x31, 0x99, 0x91, 0x5c, 0x52, 0xee, 0xef, 0xff, - 0x2c, 0xed, 0x7e, 0x43, 0xad, 0x4a, 0x5b, 0x57, 0x05, 0x1a, 0x38, 0xbb, 0x70, 0xa7, 0x69, 0x2c, - 0x72, 0x96, 0x09, 0xea, 0x7c, 0x07, 0xf0, 0xc1, 0x3f, 0x1d, 0x90, 0x03, 0xfb, 0x09, 0x99, 0xd2, - 0xa4, 0x96, 0x02, 0xa3, 0x6d, 0x1f, 0xae, 0x4a, 0xbb, 0x65, 0x82, 0xf6, 0x89, 0x26, 0x70, 0x93, - 0x66, 0x92, 0xc7, 0x54, 0x98, 0x1d, 0xe5, 0xe7, 0x60, 0xed, 0xe7, 0x4d, 0x26, 0xf9, 0xb9, 0xb6, - 0xf3, 0xf0, 0xb2, 0xb4, 0x8d, 0xda, 0x48, 0x9b, 0x1e, 0x68, 0x80, 0x9e, 0xc2, 0x5e, 0x44, 0x44, - 0x64, 0x76, 0x87, 0x60, 0xd4, 0xf3, 0x37, 0x56, 0xa5, 0x0d, 0x5e, 0x04, 0x8a, 0x72, 0x5e, 0xc3, - 0x47, 0x6f, 0x6b, 0x9d, 0x63, 0x12, 0x73, 0xed, 0x0a, 0xc1, 0x5e, 0x46, 0x52, 0xda, 0x78, 0x0a, - 0x14, 0x46, 0x7b, 0x70, 0xe3, 0x2b, 0x49, 0x0a, 0x6a, 0x76, 0x14, 0xd9, 0x04, 0xce, 0x35, 0x80, - 0x3b, 0xb7, 0x3d, 0xa0, 0x23, 0xb8, 0xfd, 0x77, 0xbc, 0xaa, 0x7e, 0x30, 0xb6, 0xdc, 0x66, 0x01, - 0xae, 0x5e, 0x80, 0xfb, 0x41, 0x67, 0xf8, 0xbb, 0xad, 0xe5, 0x8e, 0x14, 0x17, 0xbf, 0x6c, 0x10, - 0xac, 0x8b, 0xd1, 0x21, 0xec, 0x25, 0x71, 0xd6, 0xea, 0xf9, 0x5b, 0xab, 0xd2, 0x56, 0x71, 0xa0, - 0x4e, 0x94, 0x43, 0x24, 0x24, 0x2f, 0x4e, 0x64, 0xc1, 0xe9, 0xec, 0x1d, 0x95, 0x64, 0x46, 0x24, - 0x31, 0xbb, 0x6a, 0x3e, 0xd6, 0x7a, 0x3e, 0x77, 0x5f, 0xcd, 0x7f, 0xd6, 0x0a, 0x1e, 0xfe, 0x5f, - 0xfd, 0x9c, 0xa5, 0xb1, 0xa4, 0x69, 0x2e, 0xcf, 0x83, 0x7b, 0x7a, 0x8f, 0x27, 0xb0, 0x5f, 0x2f, - 0x93, 0x72, 0xf4, 0x0a, 0xf6, 0x6a, 0x84, 0xf6, 0xd7, 0x3a, 0xb7, 0xbe, 0x1f, 0xeb, 0xe0, 0x2e, - 0xdd, 0x6e, 0xdf, 0xf0, 0x3f, 0xce, 0x17, 0xd8, 0xb8, 0x5a, 0x60, 0xe3, 0x66, 0x81, 0xc1, 0xb7, - 0x0a, 0x83, 0x1f, 0x15, 0x06, 0x97, 0x15, 0x06, 0xf3, 0x0a, 0x83, 0xdf, 0x15, 0x06, 0xd7, 0x15, - 0x36, 0x6e, 0x2a, 0x0c, 0x2e, 0x96, 0xd8, 0x98, 0x2f, 0xb1, 0x71, 0xb5, 0xc4, 0xc6, 0xe7, 0x61, - 0x18, 0xcb, 0xa8, 0x98, 0xba, 0x27, 0x2c, 0xf5, 0x42, 0x4e, 0x4e, 0x49, 0x46, 0xbc, 0x84, 0x9d, - 0xc5, 0x9e, 0xfe, 0x19, 0xa6, 0x7d, 0xa5, 0xf6, 0xf2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, - 0x46, 0x64, 0x71, 0x1f, 0x03, 0x00, 0x00, + // 527 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xf5, 0x26, 0x6e, 0xda, 0x6e, 0x4a, 0xa9, 0x96, 0xb6, 0x18, 0xab, 0x5a, 0x47, 0x16, 0x87, + 0x1c, 0xc0, 0x96, 0xc2, 0x81, 0x0b, 0x97, 0x58, 0x42, 0xea, 0xa1, 0x48, 0x95, 0x41, 0x20, 0x71, + 0xdb, 0x34, 0x5b, 0xdb, 0xaa, 0xed, 0x35, 0xbb, 0x6b, 0xa4, 0xde, 0xf8, 0x84, 0xf2, 0x17, 0x7c, + 0x01, 0xdf, 0xd0, 0x63, 0x8e, 0x15, 0x07, 0x43, 0x9c, 0x0b, 0xca, 0xa9, 0x9f, 0x80, 0xbc, 0xb6, + 0x49, 0x28, 0x48, 0x5c, 0x36, 0x6f, 0x66, 0x67, 0xde, 0x7b, 0x99, 0x1d, 0xc3, 0x07, 0xd9, 0x45, + 0xe0, 0x66, 0xb9, 0x08, 0xd5, 0xe1, 0x64, 0x9c, 0x49, 0x86, 0xb6, 0x62, 0x16, 0x28, 0x64, 0xee, + 0x07, 0x2c, 0x60, 0x0a, 0xba, 0x15, 0xaa, 0xef, 0x4d, 0x2b, 0x60, 0x2c, 0x88, 0xa9, 0xab, 0xa2, + 0x49, 0x7e, 0xee, 0xca, 0x28, 0xa1, 0x42, 0x92, 0x24, 0xab, 0x0b, 0xec, 0x77, 0xb0, 0x7f, 0x9a, + 0x8b, 0xd0, 0xa7, 0x1f, 0x72, 0x2a, 0x24, 0x3a, 0x86, 0x9b, 0x42, 0x72, 0x4a, 0x12, 0x61, 0x80, + 0x41, 0x77, 0xd8, 0x1f, 0x3d, 0x74, 0x5a, 0x05, 0xe7, 0xb5, 0xba, 0x18, 0x4f, 0x49, 0x26, 0x29, + 0xf7, 0x0e, 0xbe, 0x15, 0x56, 0xaf, 0x4e, 0x2d, 0x0b, 0xab, 0xed, 0xf2, 0x5b, 0x60, 0xef, 0xc2, + 0x9d, 0x9a, 0x58, 0x64, 0x2c, 0x15, 0xd4, 0xfe, 0x0c, 0xe0, 0xbd, 0x3f, 0x18, 0x90, 0x0d, 0x7b, + 0x31, 0x99, 0xd0, 0xb8, 0x92, 0x02, 0xc3, 0x6d, 0x0f, 0x2e, 0x0b, 0xab, 0xc9, 0xf8, 0xcd, 0x2f, + 0x1a, 0xc3, 0x4d, 0x9a, 0x4a, 0x1e, 0x51, 0x61, 0x74, 0x94, 0x9f, 0xc3, 0x95, 0x9f, 0x97, 0xa9, + 0xe4, 0x97, 0xad, 0x9d, 0xfb, 0xd7, 0x85, 0xa5, 0x55, 0x46, 0x9a, 0x72, 0xbf, 0x05, 0xe8, 0x11, + 0xd4, 0x43, 0x22, 0x42, 0xa3, 0x3b, 0x00, 0x43, 0xdd, 0xdb, 0x58, 0x16, 0x16, 0x78, 0xea, 0xab, + 0x94, 0xfd, 0x02, 0xee, 0x9d, 0x54, 0x3a, 0xa7, 0x24, 0xe2, 0xad, 0x2b, 0x04, 0xf5, 0x94, 0x24, + 0xb4, 0xf6, 0xe4, 0x2b, 0x8c, 0xf6, 0xe1, 0xc6, 0x47, 0x12, 0xe7, 0xd4, 0xe8, 0xa8, 0x64, 0x1d, + 0xd8, 0x5f, 0x3b, 0x70, 0x67, 0xdd, 0x03, 0x3a, 0x86, 0xdb, 0xbf, 0xc7, 0xab, 0xfa, 0xfb, 0x23, + 0xd3, 0xa9, 0x1f, 0xc0, 0x69, 0x1f, 0xc0, 0x79, 0xd3, 0x56, 0x78, 0xbb, 0x8d, 0xe5, 0x8e, 0x14, + 0x57, 0xdf, 0x2d, 0xe0, 0xaf, 0x9a, 0xd1, 0x11, 0xd4, 0xe3, 0x28, 0x6d, 0xf4, 0xbc, 0xad, 0x65, + 0x61, 0xa9, 0xd8, 0x57, 0x27, 0xca, 0x20, 0x12, 0x92, 0xe7, 0x67, 0x32, 0xe7, 0x74, 0xfa, 0x8a, + 0x4a, 0x32, 0x25, 0x92, 0x18, 0x5d, 0x35, 0x1f, 0x73, 0x35, 0x9f, 0xbb, 0x7f, 0xcd, 0x7b, 0xdc, + 0x08, 0x1e, 0xfd, 0xdd, 0xfd, 0x84, 0x25, 0x91, 0xa4, 0x49, 0x26, 0x2f, 0xfd, 0x7f, 0x70, 0xa3, + 0x13, 0xd8, 0xcb, 0x08, 0x17, 0x74, 0x6a, 0xe8, 0xff, 0x55, 0x31, 0x1a, 0x95, 0xbd, 0xba, 0x63, + 0x8d, 0xb9, 0xe1, 0x18, 0x8d, 0x61, 0xaf, 0x5a, 0x0d, 0xca, 0xd1, 0x73, 0xa8, 0x57, 0x08, 0x1d, + 0xac, 0xf8, 0xd6, 0xb6, 0xd1, 0x3c, 0xbc, 0x9b, 0x6e, 0x76, 0x49, 0xf3, 0xde, 0xce, 0xe6, 0x58, + 0xbb, 0x99, 0x63, 0xed, 0x76, 0x8e, 0xc1, 0xa7, 0x12, 0x83, 0x2f, 0x25, 0x06, 0xd7, 0x25, 0x06, + 0xb3, 0x12, 0x83, 0x1f, 0x25, 0x06, 0x3f, 0x4b, 0xac, 0xdd, 0x96, 0x18, 0x5c, 0x2d, 0xb0, 0x36, + 0x5b, 0x60, 0xed, 0x66, 0x81, 0xb5, 0xf7, 0x83, 0x20, 0x92, 0x61, 0x3e, 0x71, 0xce, 0x58, 0xe2, + 0x06, 0x9c, 0x9c, 0x93, 0x94, 0xb8, 0x31, 0xbb, 0x88, 0xdc, 0xf6, 0xd3, 0x9a, 0xf4, 0x94, 0xda, + 0xb3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xaa, 0x57, 0xd3, 0x6d, 0x03, 0x00, 0x00, } func (this *PushRequest) Equal(that interface{}) bool { @@ -465,6 +477,14 @@ func (this *EntryAdapter) Equal(that interface{}) bool { return false } } + if len(this.Parsed) != len(that1.Parsed) { + return false + } + for i := range this.Parsed { + if !this.Parsed[i].Equal(&that1.Parsed[i]) { + return false + } + } return true } func (this *PushRequest) GoString() string { @@ -519,7 +539,7 @@ func (this *EntryAdapter) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 8) s = append(s, "&push.EntryAdapter{") s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") s = append(s, "Line: "+fmt.Sprintf("%#v", this.Line)+",\n") @@ -530,6 +550,13 @@ func (this *EntryAdapter) GoString() string { } s = append(s, "StructuredMetadata: "+fmt.Sprintf("%#v", vs)+",\n") } + if this.Parsed != nil { + vs := make([]*LabelPairAdapter, len(this.Parsed)) + for i := range vs { + vs[i] = &this.Parsed[i] + } + s = append(s, "Parsed: "+fmt.Sprintf("%#v", vs)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -788,6 +815,20 @@ func (m *EntryAdapter) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Parsed) > 0 { + for iNdEx := len(m.Parsed) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parsed[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPush(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.StructuredMetadata) > 0 { for iNdEx := len(m.StructuredMetadata) - 1; iNdEx >= 0; iNdEx-- { { @@ -912,6 +953,12 @@ func (m *EntryAdapter) Size() (n int) { n += 1 + l + sovPush(uint64(l)) } } + if len(m.Parsed) > 0 { + for _, e := range m.Parsed { + l = e.Size() + n += 1 + l + sovPush(uint64(l)) + } + } return n } @@ -977,10 +1024,16 @@ func (this *EntryAdapter) String() string { repeatedStringForStructuredMetadata += strings.Replace(strings.Replace(f.String(), "LabelPairAdapter", "LabelPairAdapter", 1), `&`, ``, 1) + "," } repeatedStringForStructuredMetadata += "}" + repeatedStringForParsed := "[]LabelPairAdapter{" + for _, f := range this.Parsed { + repeatedStringForParsed += strings.Replace(strings.Replace(f.String(), "LabelPairAdapter", "LabelPairAdapter", 1), `&`, ``, 1) + "," + } + repeatedStringForParsed += "}" s := strings.Join([]string{`&EntryAdapter{`, `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Line:` + fmt.Sprintf("%v", this.Line) + `,`, `StructuredMetadata:` + repeatedStringForStructuredMetadata + `,`, + `Parsed:` + repeatedStringForParsed + `,`, `}`, }, "") return s @@ -1516,6 +1569,40 @@ func (m *EntryAdapter) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parsed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPush + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPush + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPush + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parsed = append(m.Parsed, LabelPairAdapter{}) + if err := m.Parsed[len(m.Parsed)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPush(dAtA[iNdEx:]) diff --git a/pkg/push/push.proto b/pkg/push/push.proto index 08552a54d3ae5..3bf8ad06a8a83 100644 --- a/pkg/push/push.proto +++ b/pkg/push/push.proto @@ -46,4 +46,11 @@ message EntryAdapter { (gogoproto.nullable) = false, (gogoproto.jsontag) = "structuredMetadata,omitempty" ]; + // This field shouldn't be used by clients to push data to Loki. + // It is only used by Loki to return parsed log lines in query responses. + // TODO: Remove this field from the write path Proto. + repeated LabelPairAdapter parsed = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "parsed,omitempty" + ]; } diff --git a/pkg/push/types.go b/pkg/push/types.go index ba37d074f776b..d0fc6d6cb8358 100644 --- a/pkg/push/types.go +++ b/pkg/push/types.go @@ -25,12 +25,38 @@ type Entry struct { Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"` Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"` StructuredMetadata LabelsAdapter `protobuf:"bytes,3,opt,name=structuredMetadata,proto3" json:"structuredMetadata,omitempty"` + Parsed LabelsAdapter `protobuf:"bytes,4,opt,name=parsed,proto3" json:"parsed,omitempty"` +} + +// MarshalJSON implements json.Marshaler. +// In Loki, this method should only be used by the +// Legacy encoder used when hitting the deprecated /api/promt/query endpoint. +// We will ignore the categorized labels and only return the stream labels. +func (m *Stream) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Labels string `json:"labels"` + Entries []Entry `json:"entries"` + }{ + Labels: m.Labels, + Entries: m.Entries, + }) +} + +// MarshalJSON implements json.Marshaler. +// In Loki, this method should only be used by the +// Legacy encoder used when hitting the deprecated /api/promt/query endpoint. +// We will ignore the structured metadata. +func (m *Entry) MarshalJSON() ([]byte, error) { + type raw Entry + e := raw(*m) + e.StructuredMetadata = nil + return json.Marshal(e) } // LabelAdapter should be a copy of the Prometheus labels.Label type. // We cannot import Prometheus in this package because it would create many dependencies // in other projects importing this package. Instead, we copy the definition here, which should -// be kept in sync with the original so it can be casted to the prometheus type. +// be kept in sync with the original, so it can be cast to the prometheus type. type LabelAdapter struct { Name, Value string } @@ -172,6 +198,20 @@ func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Parsed) > 0 { + for iNdEx := len(m.Parsed) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parsed[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPush(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.StructuredMetadata) > 0 { for iNdEx := len(m.StructuredMetadata) - 1; iNdEx >= 0; iNdEx-- { { @@ -471,6 +511,40 @@ func (m *Entry) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parsed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPush + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPush + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPush + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parsed = append(m.Parsed, LabelAdapter{}) + if err := m.Parsed[len(m.Parsed)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPush(dAtA[iNdEx:]) @@ -661,6 +735,12 @@ func (m *Entry) Size() (n int) { n += 1 + l + sovPush(uint64(l)) } } + if len(m.Parsed) > 0 { + for _, e := range m.Parsed { + l = e.Size() + n += 1 + l + sovPush(uint64(l)) + } + } return n } @@ -711,7 +791,10 @@ func (m *Stream) Equal(that interface{}) bool { return false } } - return m.Hash == that1.Hash + if m.Hash != that1.Hash { + return false + } + return true } func (m *Entry) Equal(that interface{}) bool { @@ -739,11 +822,22 @@ func (m *Entry) Equal(that interface{}) bool { if m.Line != that1.Line { return false } + if len(m.StructuredMetadata) != len(that1.StructuredMetadata) { + return false + } for i := range m.StructuredMetadata { if !m.StructuredMetadata[i].Equal(that1.StructuredMetadata[i]) { return false } } + if len(m.Parsed) != len(that1.Parsed) { + return false + } + for i := range m.Parsed { + if !m.Parsed[i].Equal(that1.Parsed[i]) { + return false + } + } return true } diff --git a/pkg/push/types_test.go b/pkg/push/types_test.go index 709f756e37942..9cf89b9288374 100644 --- a/pkg/push/types_test.go +++ b/pkg/push/types_test.go @@ -14,20 +14,20 @@ var ( Labels: `{job="foobar", cluster="foo-central1", namespace="bar", container_name="buzz"}`, Hash: 1234*10 ^ 9, Entries: []Entry{ - {now, line, nil}, - {now.Add(1 * time.Second), line, LabelsAdapter{{Name: "traceID", Value: "1234"}}}, - {now.Add(2 * time.Second), line, nil}, - {now.Add(3 * time.Second), line, LabelsAdapter{{Name: "user", Value: "abc"}}}, + {now, line, nil, nil}, + {now.Add(1 * time.Second), line, LabelsAdapter{{Name: "traceID", Value: "1234"}}, nil}, + {now.Add(2 * time.Second), line, nil, nil}, + {now.Add(3 * time.Second), line, LabelsAdapter{{Name: "user", Value: "abc"}}, LabelsAdapter{{Name: "msg", Value: "text"}}}, }, } streamAdapter = StreamAdapter{ Labels: `{job="foobar", cluster="foo-central1", namespace="bar", container_name="buzz"}`, Hash: 1234*10 ^ 9, Entries: []EntryAdapter{ - {now, line, nil}, - {now.Add(1 * time.Second), line, []LabelPairAdapter{{Name: "traceID", Value: "1234"}}}, - {now.Add(2 * time.Second), line, nil}, - {now.Add(3 * time.Second), line, []LabelPairAdapter{{Name: "user", Value: "abc"}}}, + {now, line, nil, nil}, + {now.Add(1 * time.Second), line, []LabelPairAdapter{{Name: "traceID", Value: "1234"}}, nil}, + {now.Add(2 * time.Second), line, nil, nil}, + {now.Add(3 * time.Second), line, []LabelPairAdapter{{Name: "user", Value: "abc"}}, []LabelPairAdapter{{Name: "msg", Value: "text"}}}, }, } ) diff --git a/pkg/querier/http.go b/pkg/querier/http.go index cc2343dd2437b..61b2f640b44b7 100644 --- a/pkg/querier/http.go +++ b/pkg/querier/http.go @@ -289,7 +289,7 @@ func (q *QuerierAPI) IndexStatsHandler(ctx context.Context, req *loghttp.RangeQu return resp, err } -//TODO(trevorwhitney): add test for the handler split +// TODO(trevorwhitney): add test for the handler split // VolumeHandler queries the index label volumes related to the passed matchers and given time range. // Returns either N values where N is the time range / step and a single value for a time range depending on the request. diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 5b9611a4f38c5..03559b2e2f2ef 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -378,6 +378,14 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) } } + // If there is not encoding flags in the context, we try the HTTP request. + if encFlags := httpreq.ExtractEncodingFlagsFromCtx(ctx); encFlags == nil { + encFlags = httpreq.ExtractEncodingFlagsFromProto(r) + if encFlags != nil { + ctx = httpreq.AddEncodingFlagsToContext(ctx, encFlags) + } + } + if err := httpReq.ParseForm(); err != nil { return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } @@ -500,7 +508,9 @@ func (Codec) EncodeHTTPGrpcResponse(ctx context.Context, req *httpgrpc.HTTPReque version := loghttp.GetVersion(req.Url) var buf bytes.Buffer - err := encodeResponseJSONTo(version, res, &buf) + encodingFlags := httpreq.ExtractEncodingFlagsFromProto(req) + + err := encodeResponseJSONTo(version, res, &buf, encodingFlags) if err != nil { return nil, err } @@ -521,6 +531,11 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht header.Set(string(httpreq.QueryTagsHTTPHeader), queryTags) } + encodingFlags := httpreq.ExtractHeader(ctx, httpreq.LokiEncodingFlagsHeader) + if encodingFlags != "" { + header.Set(httpreq.LokiEncodingFlagsHeader, encodingFlags) + } + actor := httpreq.ExtractHeader(ctx, httpreq.LokiActorPathHeader) if actor != "" { header.Set(httpreq.LokiActorPathHeader, actor) @@ -912,15 +927,16 @@ func (Codec) EncodeResponse(ctx context.Context, req *http.Request, res queryran // Default to JSON. version := loghttp.GetVersion(req.RequestURI) - return encodeResponseJSON(ctx, version, res) + encodingFlags := httpreq.ExtractEncodingFlags(req) + return encodeResponseJSON(ctx, version, res, encodingFlags) } -func encodeResponseJSON(ctx context.Context, version loghttp.Version, res queryrangebase.Response) (*http.Response, error) { +func encodeResponseJSON(ctx context.Context, version loghttp.Version, res queryrangebase.Response, encodeFlags httpreq.EncodingFlags) (*http.Response, error) { sp, _ := opentracing.StartSpanFromContext(ctx, "codec.EncodeResponse") defer sp.Finish() var buf bytes.Buffer - err := encodeResponseJSONTo(version, res, &buf) + err := encodeResponseJSONTo(version, res, &buf, encodeFlags) if err != nil { return nil, err } @@ -937,7 +953,7 @@ func encodeResponseJSON(ctx context.Context, version loghttp.Version, res queryr return &resp, nil } -func encodeResponseJSONTo(version loghttp.Version, res queryrangebase.Response, w io.Writer) error { +func encodeResponseJSONTo(version loghttp.Version, res queryrangebase.Response, w io.Writer, encodeFlags httpreq.EncodingFlags) error { switch response := res.(type) { case *LokiPromResponse: return response.encodeTo(w) @@ -959,7 +975,7 @@ func encodeResponseJSONTo(version loghttp.Version, res queryrangebase.Response, return err } } else { - if err := marshal.WriteQueryResponseJSON(logqlmodel.Streams(streams), response.Statistics, w); err != nil { + if err := marshal.WriteQueryResponseJSON(logqlmodel.Streams(streams), response.Statistics, w, encodeFlags); err != nil { return err } } diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index 2131d34eab67a..70814ff83d091 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -28,6 +28,7 @@ import ( "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/util" + "github.com/grafana/loki/pkg/util/httpreq" ) func init() { @@ -271,6 +272,36 @@ func Test_codec_DecodeResponse(t *testing.T) { Statistics: statsResult, }, false, }, + { + "streams v1 with structured metadata", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsStringWithStructuredMetdata))}, + &LokiRequest{Direction: logproto.FORWARD, Limit: 100, Path: "/loki/api/v1/query_range"}, + &LokiResponse{ + Status: loghttp.QueryStatusSuccess, + Direction: logproto.FORWARD, + Limit: 100, + Version: uint32(loghttp.VersionV1), + Data: LokiData{ + ResultType: loghttp.ResultTypeStream, + Result: logStreamsWithStructuredMetadata, + }, + Statistics: statsResult, + }, false, + }, + { + "streams v1 with categorized labels", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsStringWithCategories))}, + &LokiRequest{Direction: logproto.FORWARD, Limit: 100, Path: "/loki/api/v1/query_range"}, + &LokiResponse{ + Status: loghttp.QueryStatusSuccess, + Direction: logproto.FORWARD, + Limit: 100, + Version: uint32(loghttp.VersionV1), + Data: LokiData{ + ResultType: loghttp.ResultTypeStream, + Result: logStreamsWithCategories, + }, + Statistics: statsResult, + }, false, + }, { "streams legacy", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsString))}, &LokiRequest{Direction: logproto.FORWARD, Limit: 100, Path: "/api/prom/query_range"}, @@ -768,13 +799,14 @@ func Test_codec_seriesVolume_DecodeRequest(t *testing.T) { func Test_codec_EncodeResponse(t *testing.T) { tests := []struct { - name string - path string - res queryrangebase.Response - body string - wantErr bool + name string + path string + res queryrangebase.Response + body string + wantErr bool + queryParams map[string]string }{ - {"error", "/loki/api/v1/query_range", &badResponse{}, "", true}, + {"error", "/loki/api/v1/query_range", &badResponse{}, "", true, nil}, { "prom", "/loki/api/v1/query_range", &LokiPromResponse{ @@ -786,7 +818,7 @@ func Test_codec_EncodeResponse(t *testing.T) { }, }, Statistics: statsResult, - }, matrixString, false}, + }, matrixString, false, nil}, { "loki v1", "/loki/api/v1/query_range", &LokiResponse{ @@ -799,7 +831,25 @@ func Test_codec_EncodeResponse(t *testing.T) { Result: logStreams, }, Statistics: statsResult, - }, streamsString, false, + }, streamsString, false, nil, + }, + { + "loki v1 with categories", "/loki/api/v1/query_range", + &LokiResponse{ + Status: loghttp.QueryStatusSuccess, + Direction: logproto.FORWARD, + Limit: 100, + Version: uint32(loghttp.VersionV1), + Data: LokiData{ + ResultType: loghttp.ResultTypeStream, + Result: logStreamsWithCategories, + }, + Statistics: statsResult, + }, + streamsStringWithCategories, false, + map[string]string{ + httpreq.LokiEncodingFlagsHeader: string(httpreq.FlagCategorizeLabels), + }, }, { "loki legacy", "/api/promt/query", @@ -813,7 +863,7 @@ func Test_codec_EncodeResponse(t *testing.T) { Result: logStreams, }, Statistics: statsResult, - }, streamsStringLegacy, false, + }, streamsStringLegacy, false, nil, }, { "loki series", "/loki/api/v1/series", @@ -821,7 +871,7 @@ func Test_codec_EncodeResponse(t *testing.T) { Status: "success", Version: uint32(loghttp.VersionV1), Data: seriesData, - }, seriesString, false, + }, seriesString, false, nil, }, { "loki labels", "/loki/api/v1/labels", @@ -829,7 +879,7 @@ func Test_codec_EncodeResponse(t *testing.T) { Status: "success", Version: uint32(loghttp.VersionV1), Data: labelsData, - }, labelsString, false, + }, labelsString, false, nil, }, { "loki labels legacy", "/api/prom/label", @@ -837,7 +887,7 @@ func Test_codec_EncodeResponse(t *testing.T) { Status: "success", Version: uint32(loghttp.VersionLegacy), Data: labelsData, - }, labelsLegacyString, false, + }, labelsLegacyString, false, nil, }, { "index stats", "/loki/api/v1/index/stats", @@ -848,7 +898,7 @@ func Test_codec_EncodeResponse(t *testing.T) { Bytes: 3, Entries: 4, }, - }, indexStatsString, false, + }, indexStatsString, false, nil, }, { "volume", "/loki/api/v1/index/volume", @@ -859,16 +909,21 @@ func Test_codec_EncodeResponse(t *testing.T) { }, Limit: 100, }, - }, seriesVolumeString, false, + }, seriesVolumeString, false, nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { u := &url.URL{Path: tt.path} + h := http.Header{} + for k, v := range tt.queryParams { + h.Set(k, v) + } req := &http.Request{ Method: "GET", RequestURI: u.String(), URL: u, + Header: h, } got, err := DefaultCodec.EncodeResponse(context.TODO(), req, tt.res) if (err != nil) != tt.wantErr { @@ -1559,17 +1614,132 @@ var ( }, { "stream": { - "test": "test2" + "test": "test", + "x": "a", + "y": "b" }, "values":[ - [ "123456789012346", "super line2"] + [ "123456789012346", "super line2" ] + ] + }, + { + "stream": { + "test": "test", + "x": "a", + "y": "b", + "z": "text" + }, + "values":[ + [ "123456789012346", "super line3 z=text" ] + ] + } + ] + } + }` + streamsStringWithStructuredMetdata = `{ + "status": "success", + "data": { + ` + statsResultString + ` + "resultType": "streams", + "result": [ + { + "stream": { + "test": "test" + }, + "values":[ + [ "123456789012345", "super line"] + ] + }, + { + "stream": { + "test": "test", + "x": "a", + "y": "b" + }, + "values":[ + [ "123456789012346", "super line2", {"x": "a", "y": "b"} ] + ] + }, + { + "stream": { + "test": "test", + "x": "a", + "y": "b", + "z": "text" + }, + "values":[ + [ "123456789012346", "super line3 z=text", {"x": "a", "y": "b"}] + ] + } + ] + } + }` + streamsStringWithCategories = `{ + "status": "success", + "data": { + ` + statsResultString + ` + "resultType": "streams", + "encodingFlags": ["` + string(httpreq.FlagCategorizeLabels) + `"], + "result": [ + { + "stream": { + "test": "test" + }, + "values":[ + [ "123456789012345", "super line"], + [ "123456789012346", "super line2", { + "structuredMetadata": { + "x": "a", + "y": "b" + } + }], + [ "123456789012347", "super line3 z=text", { + "structuredMetadata": { + "x": "a", + "y": "b" + }, + "parsed": { + "z": "text" + } + }] ] } ] } }` streamsStringLegacy = `{ - ` + statsResultString + `"streams":[{"labels":"{test=\"test\"}","entries":[{"ts":"1970-01-02T10:17:36.789012345Z","line":"super line"}]},{"labels":"{test=\"test2\"}","entries":[{"ts":"1970-01-02T10:17:36.789012346Z","line":"super line2"}]}]}` + ` + statsResultString + `"streams":[{"labels":"{test=\"test\"}","entries":[{"ts":"1970-01-02T10:17:36.789012345Z","line":"super line"}]},{"labels":"{test=\"test\", x=\"a\", y=\"b\"}","entries":[{"ts":"1970-01-02T10:17:36.789012346Z","line":"super line2"}]}, {"labels":"{test=\"test\", x=\"a\", y=\"b\", z=\"text\"}","entries":[{"ts":"1970-01-02T10:17:36.789012346Z","line":"super line3 z=text"}]}]}` + logStreamsWithStructuredMetadata = []logproto.Stream{ + { + Labels: `{test="test"}`, + Entries: []logproto.Entry{ + { + Line: "super line", + Timestamp: time.Unix(0, 123456789012345).UTC(), + }, + }, + }, + { + Labels: `{test="test", x="a", y="b"}`, + Entries: []logproto.Entry{ + { + Line: "super line2", + Timestamp: time.Unix(0, 123456789012346).UTC(), + StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("x", "a", "y", "b")), + }, + }, + }, + { + Labels: `{test="test", x="a", y="b", z="text"}`, + Entries: []logproto.Entry{ + { + Line: "super line3 z=text", + Timestamp: time.Unix(0, 123456789012346).UTC(), + StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("x", "a", "y", "b")), + }, + }, + }, + } logStreams = []logproto.Stream{ { Labels: `{test="test"}`, @@ -1581,7 +1751,7 @@ var ( }, }, { - Labels: `{test="test2"}`, + Labels: `{test="test", x="a", y="b"}`, Entries: []logproto.Entry{ { Line: "super line2", @@ -1589,6 +1759,37 @@ var ( }, }, }, + { + Labels: `{test="test", x="a", y="b", z="text"}`, + Entries: []logproto.Entry{ + { + Line: "super line3 z=text", + Timestamp: time.Unix(0, 123456789012346).UTC(), + }, + }, + }, + } + logStreamsWithCategories = []logproto.Stream{ + { + Labels: `{test="test"}`, + Entries: []logproto.Entry{ + { + Line: "super line", + Timestamp: time.Unix(0, 123456789012345).UTC(), + }, + { + Line: "super line2", + Timestamp: time.Unix(0, 123456789012346).UTC(), + StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("x", "a", "y", "b")), + }, + { + Line: "super line3 z=text", + Timestamp: time.Unix(0, 123456789012347).UTC(), + StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("x", "a", "y", "b")), + Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings("z", "text")), + }, + }, + }, } seriesString = `{ "status": "success", diff --git a/pkg/querier/queryrange/serialize.go b/pkg/querier/queryrange/serialize.go index df9e49af37e32..b85f707a692b7 100644 --- a/pkg/querier/queryrange/serialize.go +++ b/pkg/querier/queryrange/serialize.go @@ -7,6 +7,7 @@ import ( "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" + "github.com/grafana/loki/pkg/util/httpreq" serverutil "github.com/grafana/loki/pkg/util/server" ) @@ -70,7 +71,8 @@ func (rt *serializeHTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request } version := loghttp.GetVersion(r.RequestURI) - if err := encodeResponseJSONTo(version, response, w); err != nil { + encodingFlags := httpreq.ExtractEncodingFlags(r) + if err := encodeResponseJSONTo(version, response, w, encodingFlags); err != nil { serverutil.WriteError(err, w) } } diff --git a/pkg/storage/lazy_chunk_test.go b/pkg/storage/lazy_chunk_test.go index cd7059954a212..2244c02c924c6 100644 --- a/pkg/storage/lazy_chunk_test.go +++ b/pkg/storage/lazy_chunk_test.go @@ -204,7 +204,7 @@ func (fakeBlock) Entries() int { return 0 } func (fakeBlock) Offset() int { return 0 } func (f fakeBlock) MinTime() int64 { return f.mint } func (f fakeBlock) MaxTime() int64 { return f.maxt } -func (fakeBlock) Iterator(context.Context, log.StreamPipeline, ...iter.EntryIteratorOption) iter.EntryIterator { +func (fakeBlock) Iterator(context.Context, log.StreamPipeline) iter.EntryIterator { return nil } diff --git a/pkg/util/httpreq/encoding_flags.go b/pkg/util/httpreq/encoding_flags.go new file mode 100644 index 0000000000000..89656618eb60d --- /dev/null +++ b/pkg/util/httpreq/encoding_flags.go @@ -0,0 +1,113 @@ +package httpreq + +import ( + "context" + "net/http" + "strings" + + "github.com/grafana/dskit/httpgrpc" +) + +type EncodingFlag string + +type EncodingFlags map[EncodingFlag]struct{} + +func NewEncodingFlags(flags ...EncodingFlag) EncodingFlags { + var ef EncodingFlags + ef.Set(flags...) + return ef +} + +func (ef *EncodingFlags) Set(flags ...EncodingFlag) { + if *ef == nil { + *ef = make(EncodingFlags, len(flags)) + } + + for _, flag := range flags { + (*ef)[flag] = struct{}{} + } +} + +func (ef *EncodingFlags) Has(flag EncodingFlag) bool { + _, ok := (*ef)[flag] + return ok +} + +func (ef *EncodingFlags) String() string { + var sb strings.Builder + var i int + for flag := range *ef { + if i > 0 { + sb.WriteString(EncodeFlagsDelimiter) + } + sb.WriteString(string(flag)) + i++ + } + return sb.String() +} + +const ( + LokiEncodingFlagsHeader = "X-Loki-Response-Encoding-Flags" + FlagCategorizeLabels EncodingFlag = "categorize-labels" + + EncodeFlagsDelimiter = "," +) + +func AddEncodingFlags(req *http.Request, flags EncodingFlags) { + if len(flags) == 0 { + return + } + + req.Header.Set(LokiEncodingFlagsHeader, flags.String()) +} + +func AddEncodingFlagsToContext(ctx context.Context, flags EncodingFlags) context.Context { + if len(flags) == 0 { + return ctx + } + + return context.WithValue(ctx, headerContextKey(LokiEncodingFlagsHeader), flags.String()) +} + +func ExtractEncodingFlags(req *http.Request) EncodingFlags { + rawValue := req.Header.Get(LokiEncodingFlagsHeader) + if rawValue == "" { + return nil + } + + return parseEncodingFlags(rawValue) +} + +func ExtractEncodingFlagsFromProto(req *httpgrpc.HTTPRequest) EncodingFlags { + var rawValue string + for _, header := range req.GetHeaders() { + if header.GetKey() == LokiEncodingFlagsHeader { + rawValue = header.GetValues()[0] + if rawValue == "" { + return nil + } + + return parseEncodingFlags(rawValue) + } + } + + return nil +} + +func ExtractEncodingFlagsFromCtx(ctx context.Context) EncodingFlags { + rawValue := ExtractHeader(ctx, LokiEncodingFlagsHeader) + if rawValue == "" { + return nil + } + + return parseEncodingFlags(rawValue) +} + +func parseEncodingFlags(rawFlags string) EncodingFlags { + split := strings.Split(rawFlags, EncodeFlagsDelimiter) + flags := make(EncodingFlags, len(split)) + for _, rawFlag := range split { + flags.Set(EncodingFlag(rawFlag)) + } + return flags +} diff --git a/pkg/util/marshal/labels.go b/pkg/util/marshal/labels.go index 70615461620f5..8998f133b921a 100644 --- a/pkg/util/marshal/labels.go +++ b/pkg/util/marshal/labels.go @@ -12,8 +12,8 @@ func NewLabelSet(s string) (loghttp.LabelSet, error) { if err != nil { return nil, err } - ret := make(map[string]string, len(labels)) + ret := make(map[string]string, len(labels)) for _, l := range labels { ret[l.Name] = l.Value } diff --git a/pkg/util/marshal/legacy/marshal_test.go b/pkg/util/marshal/legacy/marshal_test.go index 34e1675d1efd6..83ed90ce35da1 100644 --- a/pkg/util/marshal/legacy/marshal_test.go +++ b/pkg/util/marshal/legacy/marshal_test.go @@ -50,11 +50,7 @@ var queryTests = []struct { }, { "ts": "2019-09-13T18:32:23.380001319Z", - "line": "super line with labels", - "structuredMetadata": { - "foo": "a", - "bar": "b" - } + "line": "super line with labels" } ] } @@ -229,11 +225,7 @@ var tailTests = []struct { }, { "ts": "2019-09-13T18:32:23.380001319Z", - "line": "super line with labels", - "structuredMetadata": { - "foo": "a", - "bar": "b" - } + "line": "super line with labels" } ] } diff --git a/pkg/util/marshal/marshal.go b/pkg/util/marshal/marshal.go index be03bed347a08..562808b300232 100644 --- a/pkg/util/marshal/marshal.go +++ b/pkg/util/marshal/marshal.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/stats" indexStats "github.com/grafana/loki/pkg/storage/stores/index/stats" + "github.com/grafana/loki/pkg/util/httpreq" marshal_legacy "github.com/grafana/loki/pkg/util/marshal/legacy" ) @@ -24,8 +25,9 @@ func WriteResponseJSON(r *http.Request, v any, w http.ResponseWriter) error { switch result := v.(type) { case logqlmodel.Result: version := loghttp.GetVersion(r.RequestURI) + encodeFlags := httpreq.ExtractEncodingFlags(r) if version == loghttp.VersionV1 { - return WriteQueryResponseJSON(result.Data, result.Statistics, w) + return WriteQueryResponseJSON(result.Data, result.Statistics, w, encodeFlags) } return marshal_legacy.WriteQueryResponseJSON(result, w) @@ -48,10 +50,10 @@ func WriteResponseJSON(r *http.Request, v any, w http.ResponseWriter) error { // WriteQueryResponseJSON marshals the promql.Value to v1 loghttp JSON and then // writes it to the provided io.Writer. -func WriteQueryResponseJSON(data parser.Value, statistics stats.Result, w io.Writer) error { +func WriteQueryResponseJSON(data parser.Value, statistics stats.Result, w io.Writer, encodeFlags httpreq.EncodingFlags) error { s := jsoniter.ConfigFastest.BorrowStream(w) defer jsoniter.ConfigFastest.ReturnStream(s) - err := EncodeResult(data, statistics, s) + err := EncodeResult(data, statistics, s, encodeFlags) if err != nil { return fmt.Errorf("could not write JSON response: %w", err) } diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go index dc80438d1b983..070f7b0ed4012 100644 --- a/pkg/util/marshal/marshal_test.go +++ b/pkg/util/marshal/marshal_test.go @@ -20,8 +20,188 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/util/httpreq" ) +const emptyStats = `{ + "ingester" : { + "store": { + "chunksDownloadTime": 0, + "totalChunksRef": 0, + "totalChunksDownloaded": 0, + "chunkRefsFetchTime": 0, + "chunk" :{ + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "decompressedStructuredMetadataBytes": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "headChunkStructuredMetadataBytes": 0, + "postFilterLines": 0, + "totalDuplicates": 0 + } + }, + "totalBatches": 0, + "totalChunksMatched": 0, + "totalLinesSent": 0, + "totalReached": 0 + }, + "querier": { + "store": { + "chunksDownloadTime": 0, + "totalChunksRef": 0, + "totalChunksDownloaded": 0, + "chunkRefsFetchTime": 0, + "chunk" :{ + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "decompressedStructuredMetadataBytes": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "headChunkStructuredMetadataBytes": 0, + "postFilterLines": 0, + "totalDuplicates": 0 + } + } + }, + "cache": { + "chunk": { + "entriesFound": 0, + "entriesRequested": 0, + "entriesStored": 0, + "bytesReceived": 0, + "bytesSent": 0, + "requests": 0, + "downloadTime": 0 + }, + "index": { + "entriesFound": 0, + "entriesRequested": 0, + "entriesStored": 0, + "bytesReceived": 0, + "bytesSent": 0, + "requests": 0, + "downloadTime": 0 + }, + "statsResult": { + "entriesFound": 0, + "entriesRequested": 0, + "entriesStored": 0, + "bytesReceived": 0, + "bytesSent": 0, + "requests": 0, + "downloadTime": 0 + }, + "volumeResult": { + "entriesFound": 0, + "entriesRequested": 0, + "entriesStored": 0, + "bytesReceived": 0, + "bytesSent": 0, + "requests": 0, + "downloadTime": 0 + }, + "result": { + "entriesFound": 0, + "entriesRequested": 0, + "entriesStored": 0, + "bytesReceived": 0, + "bytesSent": 0, + "requests": 0, + "downloadTime": 0 + } + }, + "summary": { + "bytesProcessedPerSecond": 0, + "execTime": 0, + "linesProcessedPerSecond": 0, + "queueTime": 0, + "shards": 0, + "splits": 0, + "subqueries": 0, + "totalBytesProcessed": 0, + "totalEntriesReturned": 0, + "totalLinesProcessed": 0, + "totalStructuredMetadataBytesProcessed": 0, + "totalPostFilterLines": 0 + } +}` + +var queryTestWithEncodingFlags = []struct { + actual parser.Value + encodingFlags httpreq.EncodingFlags + expected string +}{ + { + actual: logqlmodel.Streams{ + logproto.Stream{ + Entries: []logproto.Entry{ + { + Timestamp: time.Unix(0, 123456789012345), + Line: "super line", + }, + { + Timestamp: time.Unix(0, 123456789012346), + Line: "super line with labels", + StructuredMetadata: []logproto.LabelAdapter{ + {Name: "foo", Value: "a"}, + {Name: "bar", Value: "b"}, + }, + }, + { + Timestamp: time.Unix(0, 123456789012347), + Line: "super line with labels msg=text", + StructuredMetadata: []logproto.LabelAdapter{ + {Name: "foo", Value: "a"}, + {Name: "bar", Value: "b"}, + }, + Parsed: []logproto.LabelAdapter{ + {Name: "msg", Value: "text"}, + }, + }, + }, + Labels: `{test="test"}`, + }, + }, + encodingFlags: httpreq.NewEncodingFlags(httpreq.FlagCategorizeLabels), + expected: fmt.Sprintf(`{ + "status": "success", + "data": { + "resultType": "streams", + "encodingFlags": ["%s"], + "result": [ + { + "stream": { + "test": "test" + }, + "values":[ + [ "123456789012345", "super line"], + [ "123456789012346", "super line with labels", { + "structuredMetadata": { + "foo": "a", + "bar": "b" + } + }], + [ "123456789012347", "super line with labels msg=text", { + "structuredMetadata": { + "foo": "a", + "bar": "b" + }, + "parsed": { + "msg": "text" + } + }] + ] + } + ], + "stats" : %s + } + }`, httpreq.FlagCategorizeLabels, emptyStats), + }, +} + // covers responses from /loki/api/v1/query_range and /loki/api/v1/query var queryTests = []struct { actual parser.Value @@ -47,7 +227,7 @@ var queryTests = []struct { Labels: `{test="test"}`, }, }, - `{ + fmt.Sprintf(`{ "status": "success", "data": { "resultType": "streams", @@ -58,117 +238,13 @@ var queryTests = []struct { }, "values":[ [ "123456789012345", "super line"], - [ "123456789012346", "super line with labels", { "foo": "a", "bar": "b" } ] + [ "123456789012346", "super line with labels" ] ] } ], - "stats" : { - "ingester" : { - "store": { - "chunksDownloadTime": 0, - "totalChunksRef": 0, - "totalChunksDownloaded": 0, - "chunkRefsFetchTime": 0, - "chunk" :{ - "compressedBytes": 0, - "decompressedBytes": 0, - "decompressedLines": 0, - "decompressedStructuredMetadataBytes": 0, - "headChunkBytes": 0, - "headChunkLines": 0, - "headChunkStructuredMetadataBytes": 0, - "postFilterLines": 0, - "totalDuplicates": 0 - } - }, - "totalBatches": 0, - "totalChunksMatched": 0, - "totalLinesSent": 0, - "totalReached": 0 - }, - "querier": { - "store": { - "chunksDownloadTime": 0, - "totalChunksRef": 0, - "totalChunksDownloaded": 0, - "chunkRefsFetchTime": 0, - "chunk" :{ - "compressedBytes": 0, - "decompressedBytes": 0, - "decompressedLines": 0, - "decompressedStructuredMetadataBytes": 0, - "headChunkBytes": 0, - "headChunkLines": 0, - "headChunkStructuredMetadataBytes": 0, - "postFilterLines": 0, - "totalDuplicates": 0 - } - } - }, - "cache": { - "chunk": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "index": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "statsResult": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "volumeResult": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "result": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - } - }, - "summary": { - "bytesProcessedPerSecond": 0, - "execTime": 0, - "linesProcessedPerSecond": 0, - "queueTime": 0, - "shards": 0, - "splits": 0, - "subqueries": 0, - "totalBytesProcessed": 0, - "totalEntriesReturned": 0, - "totalLinesProcessed": 0, - "totalStructuredMetadataBytesProcessed": 0, - "totalPostFilterLines": 0 - } - } + "stats" : %s } - }`, + }`, emptyStats), }, // vector test { @@ -202,7 +278,7 @@ var queryTests = []struct { }, }, }, - `{ + fmt.Sprintf(`{ "data": { "resultType": "vector", "result": [ @@ -227,114 +303,10 @@ var queryTests = []struct { ] } ], - "stats" : { - "ingester" : { - "store": { - "chunksDownloadTime": 0, - "totalChunksRef": 0, - "totalChunksDownloaded": 0, - "chunkRefsFetchTime": 0, - "chunk" :{ - "compressedBytes": 0, - "decompressedBytes": 0, - "decompressedLines": 0, - "decompressedStructuredMetadataBytes": 0, - "headChunkBytes": 0, - "headChunkLines": 0, - "headChunkStructuredMetadataBytes": 0, - "postFilterLines": 0, - "totalDuplicates": 0 - } - }, - "totalBatches": 0, - "totalChunksMatched": 0, - "totalLinesSent": 0, - "totalReached": 0 - }, - "querier": { - "store": { - "chunksDownloadTime": 0, - "totalChunksRef": 0, - "totalChunksDownloaded": 0, - "chunkRefsFetchTime": 0, - "chunk" :{ - "compressedBytes": 0, - "decompressedBytes": 0, - "decompressedLines": 0, - "decompressedStructuredMetadataBytes": 0, - "headChunkBytes": 0, - "headChunkLines": 0, - "headChunkStructuredMetadataBytes": 0, - "postFilterLines": 0, - "totalDuplicates": 0 - } - } - }, - "cache": { - "chunk": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "index": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "statsResult": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "volumeResult": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "result": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - } - }, - "summary": { - "bytesProcessedPerSecond": 0, - "execTime": 0, - "linesProcessedPerSecond": 0, - "queueTime": 0, - "shards": 0, - "splits": 0, - "subqueries": 0, - "totalBytesProcessed": 0, - "totalEntriesReturned": 0, - "totalLinesProcessed": 0, - "totalStructuredMetadataBytesProcessed": 0, - "totalPostFilterLines": 0 - } - } - }, + "stats" : %s + }, "status": "success" - }`, + }`, emptyStats), }, // matrix test { @@ -380,7 +352,7 @@ var queryTests = []struct { }, }, }, - `{ + fmt.Sprintf(`{ "data": { "resultType": "matrix", "result": [ @@ -413,114 +385,10 @@ var queryTests = []struct { ] } ], - "stats" : { - "ingester" : { - "store": { - "chunksDownloadTime": 0, - "totalChunksRef": 0, - "totalChunksDownloaded": 0, - "chunkRefsFetchTime": 0, - "chunk" :{ - "compressedBytes": 0, - "decompressedBytes": 0, - "decompressedLines": 0, - "decompressedStructuredMetadataBytes": 0, - "headChunkBytes": 0, - "headChunkLines": 0, - "headChunkStructuredMetadataBytes": 0, - "postFilterLines": 0, - "totalDuplicates": 0 - } - }, - "totalBatches": 0, - "totalChunksMatched": 0, - "totalLinesSent": 0, - "totalReached": 0 - }, - "querier": { - "store": { - "chunksDownloadTime": 0, - "totalChunksRef": 0, - "totalChunksDownloaded": 0, - "chunkRefsFetchTime": 0, - "chunk" :{ - "compressedBytes": 0, - "decompressedBytes": 0, - "decompressedLines": 0, - "decompressedStructuredMetadataBytes": 0, - "headChunkBytes": 0, - "headChunkLines": 0, - "headChunkStructuredMetadataBytes": 0, - "postFilterLines": 0, - "totalDuplicates": 0 - } - } - }, - "cache": { - "chunk": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "index": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "statsResult": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "volumeResult": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - }, - "result": { - "entriesFound": 0, - "entriesRequested": 0, - "entriesStored": 0, - "bytesReceived": 0, - "bytesSent": 0, - "requests": 0, - "downloadTime": 0 - } - }, - "summary": { - "bytesProcessedPerSecond": 0, - "execTime": 0, - "linesProcessedPerSecond": 0, - "queueTime": 0, - "shards": 0, - "splits": 0, - "subqueries": 0, - "totalBytesProcessed": 0, - "totalEntriesReturned": 0, - "totalLinesProcessed": 0, - "totalStructuredMetadataBytesProcessed": 0, - "totalPostFilterLines": 0 - } - } + "stats" : %s }, "status": "success" - }`, + }`, emptyStats), }, } @@ -542,6 +410,7 @@ var labelTests = []struct { } // covers responses from /loki/api/v1/tail +// TODO(salvacorts): Support encoding flags. And fix serialized structured metadata labels which shouldn't be there unless the categorize flag is set. var tailTests = []struct { actual legacy.TailResponse expected string @@ -601,7 +470,14 @@ var tailTests = []struct { func Test_WriteQueryResponseJSON(t *testing.T) { for i, queryTest := range queryTests { var b bytes.Buffer - err := WriteQueryResponseJSON(queryTest.actual, stats.Result{}, &b) + err := WriteQueryResponseJSON(queryTest.actual, stats.Result{}, &b, nil) + require.NoError(t, err) + + require.JSONEqf(t, queryTest.expected, b.String(), "Query Test %d failed", i) + } + for i, queryTest := range queryTestWithEncodingFlags { + var b bytes.Buffer + err := WriteQueryResponseJSON(queryTest.actual, stats.Result{}, &b, queryTest.encodingFlags) require.NoError(t, err) require.JSONEqf(t, queryTest.expected, b.String(), "Query Test %d failed", i) @@ -633,7 +509,7 @@ func Test_WriteQueryResponseJSONWithError(t *testing.T) { }, } var b bytes.Buffer - err := WriteQueryResponseJSON(broken.Data, stats.Result{}, &b) + err := WriteQueryResponseJSON(broken.Data, stats.Result{}, &b, nil) require.Error(t, err) } @@ -756,6 +632,152 @@ func Test_WriteSeriesResponseJSON(t *testing.T) { } } +func Test_WriteQueryResponseJSON_EncodeFlags(t *testing.T) { + inputStream := logqlmodel.Streams{ + logproto.Stream{ + Labels: `{test="test"}`, + Entries: []logproto.Entry{ + { + Timestamp: time.Unix(0, 123456789012346), + Line: "super line", + }, + }, + }, + logproto.Stream{ + Labels: `{test="test", foo="a", bar="b"}`, + Entries: []logproto.Entry{ + { + Timestamp: time.Unix(0, 123456789012346), + Line: "super line with labels", + StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("foo", "a", "bar", "b")), + }, + }, + }, + logproto.Stream{ + Labels: `{test="test", foo="a", bar="b", msg="baz"}`, + Entries: []logproto.Entry{ + { + Timestamp: time.Unix(0, 123456789012346), + Line: "super line with labels msg=baz", + StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("foo", "a", "bar", "b")), + Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings("msg", "baz")), + }, + }, + }, + } + + for _, tc := range []struct { + name string + encodeFlags httpreq.EncodingFlags + expected string + }{ + { + name: "uncategorized labels", + expected: fmt.Sprintf(`{ + "status": "success", + "data": { + "resultType": "streams", + "result": [ + { + "stream": { + "test": "test" + }, + "values":[ + [ "123456789012346", "super line"] + ] + }, + { + "stream": { + "test": "test", + "foo": "a", + "bar": "b" + }, + "values":[ + [ "123456789012346", "super line with labels"] + ] + }, + { + "stream": { + "test": "test", + "foo": "a", + "bar": "b", + "msg": "baz" + }, + "values":[ + [ "123456789012346", "super line with labels msg=baz"] + ] + } + ], + "stats" : %s + } + }`, emptyStats), + }, + { + name: "categorized labels", + encodeFlags: httpreq.NewEncodingFlags(httpreq.FlagCategorizeLabels), + expected: fmt.Sprintf(`{ + "status": "success", + "data": { + "resultType": "streams", + "encodingFlags": ["%s"], + "result": [ + { + "stream": { + "test": "test" + }, + "values":[ + [ "123456789012346", "super line"] + ] + }, + { + "stream": { + "test": "test", + "foo": "a", + "bar": "b" + }, + "values":[ + [ "123456789012346", "super line with labels", { + "structuredMetadata": { + "foo": "a", + "bar": "b" + } + }] + ] + }, + { + "stream": { + "test": "test", + "foo": "a", + "bar": "b", + "msg": "baz" + }, + "values":[ + [ "123456789012346", "super line with labels msg=baz", { + "structuredMetadata": { + "foo": "a", + "bar": "b" + }, + "parsed": { + "msg": "baz" + } + }] + ] + } + ], + "stats" : %s + } + }`, httpreq.FlagCategorizeLabels, emptyStats), + }, + } { + t.Run(tc.name, func(t *testing.T) { + var b bytes.Buffer + err := WriteQueryResponseJSON(inputStream, stats.Result{}, &b, tc.encodeFlags) + require.NoError(t, err) + require.JSONEq(t, tc.expected, b.String()) + }) + } +} + // wrappedValue and its Generate method is used by quick to generate a random // parser.Value. type wrappedValue struct { @@ -857,7 +879,7 @@ func Test_EncodeResult_And_ResultValue_Parity(t *testing.T) { f := func(w wrappedValue) bool { var buf bytes.Buffer js := json.NewStream(json.ConfigFastest, &buf, 0) - err := encodeResult(w.Value, js) + err := encodeResult(w.Value, js, httpreq.NewEncodingFlags(httpreq.FlagCategorizeLabels)) require.NoError(t, err) js.Flush() actual := buf.String() @@ -883,7 +905,7 @@ func Benchmark_Encode(b *testing.B) { for n := 0; n < b.N; n++ { for _, queryTest := range queryTests { - require.NoError(b, WriteQueryResponseJSON(queryTest.actual, stats.Result{}, buf)) + require.NoError(b, WriteQueryResponseJSON(queryTest.actual, stats.Result{}, buf, nil)) buf.Reset() } } diff --git a/pkg/util/marshal/query.go b/pkg/util/marshal/query.go index 56cc5672469a3..fb6aead8a76ee 100644 --- a/pkg/util/marshal/query.go +++ b/pkg/util/marshal/query.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/util/httpreq" ) // NewResultValue constructs a ResultValue from a promql.Value @@ -174,14 +175,14 @@ func NewMetric(l labels.Labels) model.Metric { return ret } -func EncodeResult(data parser.Value, statistics stats.Result, s *jsoniter.Stream) error { +func EncodeResult(data parser.Value, statistics stats.Result, s *jsoniter.Stream, encodeFlags httpreq.EncodingFlags) error { s.WriteObjectStart() s.WriteObjectField("status") s.WriteString("success") s.WriteMore() s.WriteObjectField("data") - err := encodeData(data, statistics, s) + err := encodeData(data, statistics, s, encodeFlags) if err != nil { return err } @@ -190,15 +191,39 @@ func EncodeResult(data parser.Value, statistics stats.Result, s *jsoniter.Stream return nil } -func encodeData(data parser.Value, statistics stats.Result, s *jsoniter.Stream) error { +func encodeEncodingFlags(s *jsoniter.Stream, flags httpreq.EncodingFlags) error { + s.WriteArrayStart() + defer s.WriteArrayEnd() + + var i int + for flag := range flags { + if i > 0 { + s.WriteMore() + } + s.WriteString(string(flag)) + i++ + } + + return nil +} + +func encodeData(data parser.Value, statistics stats.Result, s *jsoniter.Stream, encodeFlags httpreq.EncodingFlags) error { s.WriteObjectStart() s.WriteObjectField("resultType") s.WriteString(string(data.Type())) + if len(encodeFlags) > 0 { + s.WriteMore() + s.WriteObjectField("encodingFlags") + if err := encodeEncodingFlags(s, encodeFlags); err != nil { + return err + } + } + s.WriteMore() s.WriteObjectField("result") - err := encodeResult(data, s) + err := encodeResult(data, s, encodeFlags) if err != nil { return err } @@ -212,7 +237,7 @@ func encodeData(data parser.Value, statistics stats.Result, s *jsoniter.Stream) return nil } -func encodeResult(v parser.Value, s *jsoniter.Stream) error { +func encodeResult(v parser.Value, s *jsoniter.Stream, encodeFlags httpreq.EncodingFlags) error { switch v.Type() { case loghttp.ResultTypeStream: result, ok := v.(logqlmodel.Streams) @@ -221,7 +246,7 @@ func encodeResult(v parser.Value, s *jsoniter.Stream) error { return fmt.Errorf("unexpected type %T for streams", s) } - return encodeStreams(result, s) + return encodeStreams(result, s, encodeFlags) case loghttp.ResultTypeScalar: scalar, ok := v.(promql.Scalar) @@ -256,7 +281,7 @@ func encodeResult(v parser.Value, s *jsoniter.Stream) error { return nil } -func encodeStreams(streams logqlmodel.Streams, s *jsoniter.Stream) error { +func encodeStreams(streams logqlmodel.Streams, s *jsoniter.Stream, encodeFlags httpreq.EncodingFlags) error { s.WriteArrayStart() defer s.WriteArrayEnd() @@ -265,7 +290,7 @@ func encodeStreams(streams logqlmodel.Streams, s *jsoniter.Stream) error { s.WriteMore() } - err := encodeStream(stream, s) + err := encodeStream(stream, s, encodeFlags) if err != nil { return err } @@ -274,25 +299,35 @@ func encodeStreams(streams logqlmodel.Streams, s *jsoniter.Stream) error { return nil } -func encodeStream(stream logproto.Stream, s *jsoniter.Stream) error { +func encodeLabels(labels []logproto.LabelAdapter, s *jsoniter.Stream) { + for i, label := range labels { + if i > 0 { + s.WriteMore() + } + + s.WriteObjectField(label.Name) + s.WriteString(label.Value) + } +} + +// encodeStream encodes a logproto.Stream to JSON. +// If the FlagCategorizeLabels is set, the stream labels are grouped by their group name. +// Otherwise, the stream labels are written one after the other. +func encodeStream(stream logproto.Stream, s *jsoniter.Stream, encodeFlags httpreq.EncodingFlags) error { + categorizeLabels := encodeFlags.Has(httpreq.FlagCategorizeLabels) + s.WriteObjectStart() defer s.WriteObjectEnd() s.WriteObjectField("stream") s.WriteObjectStart() - labels, err := parser.ParseMetric(stream.Labels) + + lbls, err := parser.ParseMetric(stream.Labels) if err != nil { return err } + encodeLabels(logproto.FromLabelsToLabelAdapters(lbls), s) - for i, l := range labels { - if i > 0 { - s.WriteMore() - } - - s.WriteObjectField(l.Name) - s.WriteString(l.Value) - } s.WriteObjectEnd() s.Flush() @@ -311,16 +346,30 @@ func encodeStream(stream logproto.Stream, s *jsoniter.Stream) error { s.WriteRaw(`"`) s.WriteMore() s.WriteStringWithHTMLEscaped(e.Line) - if len(e.StructuredMetadata) > 0 { + + if categorizeLabels && (len(e.StructuredMetadata) > 0 || len(e.Parsed) > 0) { s.WriteMore() s.WriteObjectStart() - for i, lbl := range e.StructuredMetadata { - if i > 0 { + + var writeMore bool + if len(e.StructuredMetadata) > 0 { + s.WriteObjectField("structuredMetadata") + s.WriteObjectStart() + encodeLabels(e.StructuredMetadata, s) + s.WriteObjectEnd() + writeMore = true + } + + if len(e.Parsed) > 0 { + if writeMore { s.WriteMore() } - s.WriteObjectField(lbl.Name) - s.WriteString(lbl.Value) + s.WriteObjectField("parsed") + s.WriteObjectStart() + encodeLabels(e.Parsed, s) + s.WriteObjectEnd() } + s.WriteObjectEnd() } s.WriteArrayEnd() diff --git a/vendor/github.com/grafana/loki/pkg/push/push.pb.go b/vendor/github.com/grafana/loki/pkg/push/push.pb.go index 441c4409f311b..3b07d850ff162 100644 --- a/vendor/github.com/grafana/loki/pkg/push/push.pb.go +++ b/vendor/github.com/grafana/loki/pkg/push/push.pb.go @@ -219,6 +219,10 @@ type EntryAdapter struct { Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"` Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"` StructuredMetadata []LabelPairAdapter `protobuf:"bytes,3,rep,name=structuredMetadata,proto3" json:"structuredMetadata,omitempty"` + // This field shouldn't be used by clients to push data to Loki. + // It is only used by Loki to return parsed log lines in query responses. + // TODO: Remove this field from the write path Proto. + Parsed []LabelPairAdapter `protobuf:"bytes,4,rep,name=parsed,proto3" json:"parsed,omitempty"` } func (m *EntryAdapter) Reset() { *m = EntryAdapter{} } @@ -274,6 +278,13 @@ func (m *EntryAdapter) GetStructuredMetadata() []LabelPairAdapter { return nil } +func (m *EntryAdapter) GetParsed() []LabelPairAdapter { + if m != nil { + return m.Parsed + } + return nil +} + func init() { proto.RegisterType((*PushRequest)(nil), "logproto.PushRequest") proto.RegisterType((*PushResponse)(nil), "logproto.PushResponse") @@ -285,39 +296,40 @@ func init() { func init() { proto.RegisterFile("pkg/push/push.proto", fileDescriptor_35ec442956852c9e) } var fileDescriptor_35ec442956852c9e = []byte{ - // 503 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xf6, 0x25, 0x69, 0xda, 0x5e, 0x4a, 0x41, 0x47, 0x5b, 0x8c, 0x55, 0x9d, 0x23, 0x8b, 0x21, - 0x03, 0xd8, 0x52, 0x18, 0x58, 0x58, 0x62, 0x09, 0xa9, 0x03, 0x48, 0x95, 0x41, 0x20, 0xb1, 0x5d, - 0x9a, 0xab, 0x6d, 0xd5, 0xf6, 0x99, 0xbb, 0x33, 0x52, 0x37, 0x7e, 0x42, 0xf9, 0x17, 0xfc, 0x94, - 0x8e, 0x19, 0x2b, 0x06, 0x43, 0x9c, 0xa5, 0xca, 0xd4, 0x9f, 0x80, 0x7c, 0xf6, 0x91, 0x52, 0xba, - 0x9c, 0xbf, 0xf7, 0xdd, 0x7b, 0xef, 0xfb, 0xfc, 0x9e, 0x0d, 0x1f, 0xe7, 0x67, 0xa1, 0x97, 0x17, - 0x22, 0x52, 0x87, 0x9b, 0x73, 0x26, 0x19, 0xda, 0x4a, 0x58, 0xa8, 0x90, 0xb5, 0x17, 0xb2, 0x90, - 0x29, 0xe8, 0xd5, 0xa8, 0xb9, 0xb7, 0xec, 0x90, 0xb1, 0x30, 0xa1, 0x9e, 0x8a, 0xa6, 0xc5, 0xa9, - 0x27, 0xe3, 0x94, 0x0a, 0x49, 0xd2, 0xbc, 0x49, 0x70, 0x3e, 0xc1, 0xc1, 0x71, 0x21, 0xa2, 0x80, - 0x7e, 0x29, 0xa8, 0x90, 0xe8, 0x08, 0x6e, 0x0a, 0xc9, 0x29, 0x49, 0x85, 0x09, 0x86, 0xdd, 0xd1, - 0x60, 0xfc, 0xc4, 0xd5, 0x0a, 0xee, 0x7b, 0x75, 0x31, 0x99, 0x91, 0x5c, 0x52, 0xee, 0xef, 0xff, - 0x2c, 0xed, 0x7e, 0x43, 0xad, 0x4a, 0x5b, 0x57, 0x05, 0x1a, 0x38, 0xbb, 0x70, 0xa7, 0x69, 0x2c, - 0x72, 0x96, 0x09, 0xea, 0x7c, 0x07, 0xf0, 0xc1, 0x3f, 0x1d, 0x90, 0x03, 0xfb, 0x09, 0x99, 0xd2, - 0xa4, 0x96, 0x02, 0xa3, 0x6d, 0x1f, 0xae, 0x4a, 0xbb, 0x65, 0x82, 0xf6, 0x89, 0x26, 0x70, 0x93, - 0x66, 0x92, 0xc7, 0x54, 0x98, 0x1d, 0xe5, 0xe7, 0x60, 0xed, 0xe7, 0x4d, 0x26, 0xf9, 0xb9, 0xb6, - 0xf3, 0xf0, 0xb2, 0xb4, 0x8d, 0xda, 0x48, 0x9b, 0x1e, 0x68, 0x80, 0x9e, 0xc2, 0x5e, 0x44, 0x44, - 0x64, 0x76, 0x87, 0x60, 0xd4, 0xf3, 0x37, 0x56, 0xa5, 0x0d, 0x5e, 0x04, 0x8a, 0x72, 0x5e, 0xc3, - 0x47, 0x6f, 0x6b, 0x9d, 0x63, 0x12, 0x73, 0xed, 0x0a, 0xc1, 0x5e, 0x46, 0x52, 0xda, 0x78, 0x0a, - 0x14, 0x46, 0x7b, 0x70, 0xe3, 0x2b, 0x49, 0x0a, 0x6a, 0x76, 0x14, 0xd9, 0x04, 0xce, 0x35, 0x80, - 0x3b, 0xb7, 0x3d, 0xa0, 0x23, 0xb8, 0xfd, 0x77, 0xbc, 0xaa, 0x7e, 0x30, 0xb6, 0xdc, 0x66, 0x01, - 0xae, 0x5e, 0x80, 0xfb, 0x41, 0x67, 0xf8, 0xbb, 0xad, 0xe5, 0x8e, 0x14, 0x17, 0xbf, 0x6c, 0x10, - 0xac, 0x8b, 0xd1, 0x21, 0xec, 0x25, 0x71, 0xd6, 0xea, 0xf9, 0x5b, 0xab, 0xd2, 0x56, 0x71, 0xa0, - 0x4e, 0x94, 0x43, 0x24, 0x24, 0x2f, 0x4e, 0x64, 0xc1, 0xe9, 0xec, 0x1d, 0x95, 0x64, 0x46, 0x24, - 0x31, 0xbb, 0x6a, 0x3e, 0xd6, 0x7a, 0x3e, 0x77, 0x5f, 0xcd, 0x7f, 0xd6, 0x0a, 0x1e, 0xfe, 0x5f, - 0xfd, 0x9c, 0xa5, 0xb1, 0xa4, 0x69, 0x2e, 0xcf, 0x83, 0x7b, 0x7a, 0x8f, 0x27, 0xb0, 0x5f, 0x2f, - 0x93, 0x72, 0xf4, 0x0a, 0xf6, 0x6a, 0x84, 0xf6, 0xd7, 0x3a, 0xb7, 0xbe, 0x1f, 0xeb, 0xe0, 0x2e, - 0xdd, 0x6e, 0xdf, 0xf0, 0x3f, 0xce, 0x17, 0xd8, 0xb8, 0x5a, 0x60, 0xe3, 0x66, 0x81, 0xc1, 0xb7, - 0x0a, 0x83, 0x1f, 0x15, 0x06, 0x97, 0x15, 0x06, 0xf3, 0x0a, 0x83, 0xdf, 0x15, 0x06, 0xd7, 0x15, - 0x36, 0x6e, 0x2a, 0x0c, 0x2e, 0x96, 0xd8, 0x98, 0x2f, 0xb1, 0x71, 0xb5, 0xc4, 0xc6, 0xe7, 0x61, - 0x18, 0xcb, 0xa8, 0x98, 0xba, 0x27, 0x2c, 0xf5, 0x42, 0x4e, 0x4e, 0x49, 0x46, 0xbc, 0x84, 0x9d, - 0xc5, 0x9e, 0xfe, 0x19, 0xa6, 0x7d, 0xa5, 0xf6, 0xf2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, - 0x46, 0x64, 0x71, 0x1f, 0x03, 0x00, 0x00, + // 527 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xf5, 0x26, 0x6e, 0xda, 0x6e, 0x4a, 0xa9, 0x96, 0xb6, 0x18, 0xab, 0x5a, 0x47, 0x16, 0x87, + 0x1c, 0xc0, 0x96, 0xc2, 0x81, 0x0b, 0x97, 0x58, 0x42, 0xea, 0xa1, 0x48, 0x95, 0x41, 0x20, 0x71, + 0xdb, 0x34, 0x5b, 0xdb, 0xaa, 0xed, 0x35, 0xbb, 0x6b, 0xa4, 0xde, 0xf8, 0x84, 0xf2, 0x17, 0x7c, + 0x01, 0xdf, 0xd0, 0x63, 0x8e, 0x15, 0x07, 0x43, 0x9c, 0x0b, 0xca, 0xa9, 0x9f, 0x80, 0xbc, 0xb6, + 0x49, 0x28, 0x48, 0x5c, 0x36, 0x6f, 0x66, 0x67, 0xde, 0x7b, 0x99, 0x1d, 0xc3, 0x07, 0xd9, 0x45, + 0xe0, 0x66, 0xb9, 0x08, 0xd5, 0xe1, 0x64, 0x9c, 0x49, 0x86, 0xb6, 0x62, 0x16, 0x28, 0x64, 0xee, + 0x07, 0x2c, 0x60, 0x0a, 0xba, 0x15, 0xaa, 0xef, 0x4d, 0x2b, 0x60, 0x2c, 0x88, 0xa9, 0xab, 0xa2, + 0x49, 0x7e, 0xee, 0xca, 0x28, 0xa1, 0x42, 0x92, 0x24, 0xab, 0x0b, 0xec, 0x77, 0xb0, 0x7f, 0x9a, + 0x8b, 0xd0, 0xa7, 0x1f, 0x72, 0x2a, 0x24, 0x3a, 0x86, 0x9b, 0x42, 0x72, 0x4a, 0x12, 0x61, 0x80, + 0x41, 0x77, 0xd8, 0x1f, 0x3d, 0x74, 0x5a, 0x05, 0xe7, 0xb5, 0xba, 0x18, 0x4f, 0x49, 0x26, 0x29, + 0xf7, 0x0e, 0xbe, 0x15, 0x56, 0xaf, 0x4e, 0x2d, 0x0b, 0xab, 0xed, 0xf2, 0x5b, 0x60, 0xef, 0xc2, + 0x9d, 0x9a, 0x58, 0x64, 0x2c, 0x15, 0xd4, 0xfe, 0x0c, 0xe0, 0xbd, 0x3f, 0x18, 0x90, 0x0d, 0x7b, + 0x31, 0x99, 0xd0, 0xb8, 0x92, 0x02, 0xc3, 0x6d, 0x0f, 0x2e, 0x0b, 0xab, 0xc9, 0xf8, 0xcd, 0x2f, + 0x1a, 0xc3, 0x4d, 0x9a, 0x4a, 0x1e, 0x51, 0x61, 0x74, 0x94, 0x9f, 0xc3, 0x95, 0x9f, 0x97, 0xa9, + 0xe4, 0x97, 0xad, 0x9d, 0xfb, 0xd7, 0x85, 0xa5, 0x55, 0x46, 0x9a, 0x72, 0xbf, 0x05, 0xe8, 0x11, + 0xd4, 0x43, 0x22, 0x42, 0xa3, 0x3b, 0x00, 0x43, 0xdd, 0xdb, 0x58, 0x16, 0x16, 0x78, 0xea, 0xab, + 0x94, 0xfd, 0x02, 0xee, 0x9d, 0x54, 0x3a, 0xa7, 0x24, 0xe2, 0xad, 0x2b, 0x04, 0xf5, 0x94, 0x24, + 0xb4, 0xf6, 0xe4, 0x2b, 0x8c, 0xf6, 0xe1, 0xc6, 0x47, 0x12, 0xe7, 0xd4, 0xe8, 0xa8, 0x64, 0x1d, + 0xd8, 0x5f, 0x3b, 0x70, 0x67, 0xdd, 0x03, 0x3a, 0x86, 0xdb, 0xbf, 0xc7, 0xab, 0xfa, 0xfb, 0x23, + 0xd3, 0xa9, 0x1f, 0xc0, 0x69, 0x1f, 0xc0, 0x79, 0xd3, 0x56, 0x78, 0xbb, 0x8d, 0xe5, 0x8e, 0x14, + 0x57, 0xdf, 0x2d, 0xe0, 0xaf, 0x9a, 0xd1, 0x11, 0xd4, 0xe3, 0x28, 0x6d, 0xf4, 0xbc, 0xad, 0x65, + 0x61, 0xa9, 0xd8, 0x57, 0x27, 0xca, 0x20, 0x12, 0x92, 0xe7, 0x67, 0x32, 0xe7, 0x74, 0xfa, 0x8a, + 0x4a, 0x32, 0x25, 0x92, 0x18, 0x5d, 0x35, 0x1f, 0x73, 0x35, 0x9f, 0xbb, 0x7f, 0xcd, 0x7b, 0xdc, + 0x08, 0x1e, 0xfd, 0xdd, 0xfd, 0x84, 0x25, 0x91, 0xa4, 0x49, 0x26, 0x2f, 0xfd, 0x7f, 0x70, 0xa3, + 0x13, 0xd8, 0xcb, 0x08, 0x17, 0x74, 0x6a, 0xe8, 0xff, 0x55, 0x31, 0x1a, 0x95, 0xbd, 0xba, 0x63, + 0x8d, 0xb9, 0xe1, 0x18, 0x8d, 0x61, 0xaf, 0x5a, 0x0d, 0xca, 0xd1, 0x73, 0xa8, 0x57, 0x08, 0x1d, + 0xac, 0xf8, 0xd6, 0xb6, 0xd1, 0x3c, 0xbc, 0x9b, 0x6e, 0x76, 0x49, 0xf3, 0xde, 0xce, 0xe6, 0x58, + 0xbb, 0x99, 0x63, 0xed, 0x76, 0x8e, 0xc1, 0xa7, 0x12, 0x83, 0x2f, 0x25, 0x06, 0xd7, 0x25, 0x06, + 0xb3, 0x12, 0x83, 0x1f, 0x25, 0x06, 0x3f, 0x4b, 0xac, 0xdd, 0x96, 0x18, 0x5c, 0x2d, 0xb0, 0x36, + 0x5b, 0x60, 0xed, 0x66, 0x81, 0xb5, 0xf7, 0x83, 0x20, 0x92, 0x61, 0x3e, 0x71, 0xce, 0x58, 0xe2, + 0x06, 0x9c, 0x9c, 0x93, 0x94, 0xb8, 0x31, 0xbb, 0x88, 0xdc, 0xf6, 0xd3, 0x9a, 0xf4, 0x94, 0xda, + 0xb3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xaa, 0x57, 0xd3, 0x6d, 0x03, 0x00, 0x00, } func (this *PushRequest) Equal(that interface{}) bool { @@ -465,6 +477,14 @@ func (this *EntryAdapter) Equal(that interface{}) bool { return false } } + if len(this.Parsed) != len(that1.Parsed) { + return false + } + for i := range this.Parsed { + if !this.Parsed[i].Equal(&that1.Parsed[i]) { + return false + } + } return true } func (this *PushRequest) GoString() string { @@ -519,7 +539,7 @@ func (this *EntryAdapter) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 8) s = append(s, "&push.EntryAdapter{") s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") s = append(s, "Line: "+fmt.Sprintf("%#v", this.Line)+",\n") @@ -530,6 +550,13 @@ func (this *EntryAdapter) GoString() string { } s = append(s, "StructuredMetadata: "+fmt.Sprintf("%#v", vs)+",\n") } + if this.Parsed != nil { + vs := make([]*LabelPairAdapter, len(this.Parsed)) + for i := range vs { + vs[i] = &this.Parsed[i] + } + s = append(s, "Parsed: "+fmt.Sprintf("%#v", vs)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -788,6 +815,20 @@ func (m *EntryAdapter) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Parsed) > 0 { + for iNdEx := len(m.Parsed) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parsed[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPush(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.StructuredMetadata) > 0 { for iNdEx := len(m.StructuredMetadata) - 1; iNdEx >= 0; iNdEx-- { { @@ -912,6 +953,12 @@ func (m *EntryAdapter) Size() (n int) { n += 1 + l + sovPush(uint64(l)) } } + if len(m.Parsed) > 0 { + for _, e := range m.Parsed { + l = e.Size() + n += 1 + l + sovPush(uint64(l)) + } + } return n } @@ -977,10 +1024,16 @@ func (this *EntryAdapter) String() string { repeatedStringForStructuredMetadata += strings.Replace(strings.Replace(f.String(), "LabelPairAdapter", "LabelPairAdapter", 1), `&`, ``, 1) + "," } repeatedStringForStructuredMetadata += "}" + repeatedStringForParsed := "[]LabelPairAdapter{" + for _, f := range this.Parsed { + repeatedStringForParsed += strings.Replace(strings.Replace(f.String(), "LabelPairAdapter", "LabelPairAdapter", 1), `&`, ``, 1) + "," + } + repeatedStringForParsed += "}" s := strings.Join([]string{`&EntryAdapter{`, `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Line:` + fmt.Sprintf("%v", this.Line) + `,`, `StructuredMetadata:` + repeatedStringForStructuredMetadata + `,`, + `Parsed:` + repeatedStringForParsed + `,`, `}`, }, "") return s @@ -1516,6 +1569,40 @@ func (m *EntryAdapter) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parsed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPush + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPush + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPush + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parsed = append(m.Parsed, LabelPairAdapter{}) + if err := m.Parsed[len(m.Parsed)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPush(dAtA[iNdEx:]) diff --git a/vendor/github.com/grafana/loki/pkg/push/push.proto b/vendor/github.com/grafana/loki/pkg/push/push.proto index 08552a54d3ae5..3bf8ad06a8a83 100644 --- a/vendor/github.com/grafana/loki/pkg/push/push.proto +++ b/vendor/github.com/grafana/loki/pkg/push/push.proto @@ -46,4 +46,11 @@ message EntryAdapter { (gogoproto.nullable) = false, (gogoproto.jsontag) = "structuredMetadata,omitempty" ]; + // This field shouldn't be used by clients to push data to Loki. + // It is only used by Loki to return parsed log lines in query responses. + // TODO: Remove this field from the write path Proto. + repeated LabelPairAdapter parsed = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "parsed,omitempty" + ]; } diff --git a/vendor/github.com/grafana/loki/pkg/push/types.go b/vendor/github.com/grafana/loki/pkg/push/types.go index ba37d074f776b..d0fc6d6cb8358 100644 --- a/vendor/github.com/grafana/loki/pkg/push/types.go +++ b/vendor/github.com/grafana/loki/pkg/push/types.go @@ -25,12 +25,38 @@ type Entry struct { Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"` Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"` StructuredMetadata LabelsAdapter `protobuf:"bytes,3,opt,name=structuredMetadata,proto3" json:"structuredMetadata,omitempty"` + Parsed LabelsAdapter `protobuf:"bytes,4,opt,name=parsed,proto3" json:"parsed,omitempty"` +} + +// MarshalJSON implements json.Marshaler. +// In Loki, this method should only be used by the +// Legacy encoder used when hitting the deprecated /api/promt/query endpoint. +// We will ignore the categorized labels and only return the stream labels. +func (m *Stream) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Labels string `json:"labels"` + Entries []Entry `json:"entries"` + }{ + Labels: m.Labels, + Entries: m.Entries, + }) +} + +// MarshalJSON implements json.Marshaler. +// In Loki, this method should only be used by the +// Legacy encoder used when hitting the deprecated /api/promt/query endpoint. +// We will ignore the structured metadata. +func (m *Entry) MarshalJSON() ([]byte, error) { + type raw Entry + e := raw(*m) + e.StructuredMetadata = nil + return json.Marshal(e) } // LabelAdapter should be a copy of the Prometheus labels.Label type. // We cannot import Prometheus in this package because it would create many dependencies // in other projects importing this package. Instead, we copy the definition here, which should -// be kept in sync with the original so it can be casted to the prometheus type. +// be kept in sync with the original, so it can be cast to the prometheus type. type LabelAdapter struct { Name, Value string } @@ -172,6 +198,20 @@ func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Parsed) > 0 { + for iNdEx := len(m.Parsed) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parsed[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPush(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.StructuredMetadata) > 0 { for iNdEx := len(m.StructuredMetadata) - 1; iNdEx >= 0; iNdEx-- { { @@ -471,6 +511,40 @@ func (m *Entry) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parsed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPush + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPush + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPush + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parsed = append(m.Parsed, LabelAdapter{}) + if err := m.Parsed[len(m.Parsed)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPush(dAtA[iNdEx:]) @@ -661,6 +735,12 @@ func (m *Entry) Size() (n int) { n += 1 + l + sovPush(uint64(l)) } } + if len(m.Parsed) > 0 { + for _, e := range m.Parsed { + l = e.Size() + n += 1 + l + sovPush(uint64(l)) + } + } return n } @@ -711,7 +791,10 @@ func (m *Stream) Equal(that interface{}) bool { return false } } - return m.Hash == that1.Hash + if m.Hash != that1.Hash { + return false + } + return true } func (m *Entry) Equal(that interface{}) bool { @@ -739,11 +822,22 @@ func (m *Entry) Equal(that interface{}) bool { if m.Line != that1.Line { return false } + if len(m.StructuredMetadata) != len(that1.StructuredMetadata) { + return false + } for i := range m.StructuredMetadata { if !m.StructuredMetadata[i].Equal(that1.StructuredMetadata[i]) { return false } } + if len(m.Parsed) != len(that1.Parsed) { + return false + } + for i := range m.Parsed { + if !m.Parsed[i].Equal(that1.Parsed[i]) { + return false + } + } return true } diff --git a/vendor/modules.txt b/vendor/modules.txt index 2689b10acdd00..24ea181a4b1b9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -903,7 +903,7 @@ github.com/grafana/go-gelf/v2/gelf # github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1 ## explicit; go 1.18 github.com/grafana/gomemcache/memcache -# github.com/grafana/loki/pkg/push v0.0.0-20231017172654-cfc4f0e84adc => ./pkg/push +# github.com/grafana/loki/pkg/push v0.0.0-20231023154132-0a7737e7c7eb => ./pkg/push ## explicit; go 1.19 github.com/grafana/loki/pkg/push # github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd => github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd From 5def37d63d8a4279b9f26d4bc62bd04077e42814 Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Wed, 25 Oct 2023 15:20:10 +0200 Subject: [PATCH 20/33] operator: Prepare community release v0.5.0 (#10996) --- operator/CHANGELOG.md | 2 ++ operator/Makefile | 2 +- ...oller-manager-metrics-service_v1_service.yaml | 4 ++-- ...oki-operator-manager-config_v1_configmap.yaml | 4 ++-- ..._monitoring.coreos.com_v1_servicemonitor.yaml | 4 ++-- ...rbac.authorization.k8s.io_v1_clusterrole.yaml | 4 ++-- ...etheus_rbac.authorization.k8s.io_v1_role.yaml | 4 ++-- ...rbac.authorization.k8s.io_v1_rolebinding.yaml | 4 ++-- ...loki-operator-webhook-service_v1_service.yaml | 4 ++-- .../loki-operator.clusterserviceversion.yaml | 16 ++++++++-------- .../loki.grafana.com_alertingrules.yaml | 4 ++-- .../manifests/loki.grafana.com_lokistacks.yaml | 4 ++-- .../loki.grafana.com_recordingrules.yaml | 4 ++-- .../manifests/loki.grafana.com_rulerconfigs.yaml | 4 ++-- ...oller-manager-metrics-service_v1_service.yaml | 4 ++-- ...oki-operator-manager-config_v1_configmap.yaml | 4 ++-- ...rbac.authorization.k8s.io_v1_clusterrole.yaml | 4 ++-- ...etheus_rbac.authorization.k8s.io_v1_role.yaml | 4 ++-- ...rbac.authorization.k8s.io_v1_rolebinding.yaml | 4 ++-- ...loki-operator-webhook-service_v1_service.yaml | 4 ++-- .../loki-operator.clusterserviceversion.yaml | 16 ++++++++-------- .../loki.grafana.com_alertingrules.yaml | 4 ++-- .../manifests/loki.grafana.com_lokistacks.yaml | 4 ++-- .../loki.grafana.com_recordingrules.yaml | 4 ++-- .../manifests/loki.grafana.com_rulerconfigs.yaml | 4 ++-- .../loki-operator.clusterserviceversion.yaml | 2 +- .../loki-operator.clusterserviceversion.yaml | 4 ++-- .../loki-operator.clusterserviceversion.yaml | 4 ++-- .../community-openshift/kustomization.yaml | 6 +++--- .../config/overlays/community/kustomization.yaml | 6 +++--- 30 files changed, 72 insertions(+), 70 deletions(-) diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 6d86c1bcadf77..e907413eeba13 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,7 @@ ## Main +## 0.5.0 (2023-10-24) + - [10924](https://github.com/grafana/loki/pull/10924) **periklis**: Update Loki operand to v2.9.2 - [10874](https://github.com/grafana/loki/pull/10874) **periklis**: Bump deps to address CVE-2023-39325 and CVE-2023-44487 - [10854](https://github.com/grafana/loki/pull/10854) **periklis**: Add missing marker/sweeper panels in retention dashboard diff --git a/operator/Makefile b/operator/Makefile index 88a35068364f5..680b4f509ca3c 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -21,7 +21,7 @@ LOKI_OPERATOR_NS ?= kubernetes-operators # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.4.0 +VERSION ?= 0.5.0 CHANNELS ?= "alpha" DEFAULT_CHANNEL ?= "alpha" SUPPORTED_OCP_VERSIONS="v4.12" diff --git a/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml index 7101ca5af48a5..6f5c94cffea84 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml @@ -5,11 +5,11 @@ metadata: service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-controller-manager-metrics-service spec: ports: diff --git a/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml index a18569f871c59..d90b7551218d1 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-manager-config_v1_configmap.yaml @@ -60,9 +60,9 @@ data: kind: ConfigMap metadata: labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-manager-config diff --git a/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml b/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml index 97128e4175d9b..7f5c401e1c073 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml @@ -2,11 +2,11 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator name: loki-operator-metrics-monitor spec: diff --git a/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml index 96738fd625cc7..ed266cabf3925 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -3,11 +3,11 @@ kind: ClusterRole metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-metrics-reader rules: - nonResourceURLs: diff --git a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml index 5472ebb497895..f427ef94b46d0 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml @@ -6,11 +6,11 @@ metadata: include.release.openshift.io/single-node-developer: "true" creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-prometheus rules: - apiGroups: diff --git a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml index d37772946f2d7..ac051dc2b594a 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml @@ -6,11 +6,11 @@ metadata: include.release.openshift.io/single-node-developer: "true" creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-prometheus roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml b/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml index 5979829a9e38c..94d97b4c3339d 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator-webhook-service_v1_service.yaml @@ -3,11 +3,11 @@ kind: Service metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-webhook-service spec: ports: diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml index 9ea06a23e299b..4d70fb9e082b4 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -149,8 +149,8 @@ metadata: capabilities: Full Lifecycle categories: OpenShift Optional, Logging & Tracing certified: "false" - containerImage: docker.io/grafana/loki-operator:0.4.0 - createdAt: "2023-10-17T07:40:29Z" + containerImage: docker.io/grafana/loki-operator:0.5.0 + createdAt: "2023-10-23T07:39:01Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -160,7 +160,7 @@ metadata: labels: operatorframework.io/arch.amd64: supported operatorframework.io/arch.arm64: supported - name: loki-operator.v0.4.0 + name: loki-operator.v0.5.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -1623,11 +1623,11 @@ spec: serviceAccountName: default deployments: - label: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 control-plane: controller-manager name: loki-operator-controller-manager spec: @@ -1661,7 +1661,7 @@ spec: value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA value: quay.io/observatorium/opa-openshift:latest - image: docker.io/grafana/loki-operator:0.4.0 + image: docker.io/grafana/loki-operator:0.5.0 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -1785,8 +1785,8 @@ spec: name: gateway - image: quay.io/observatorium/opa-openshift:latest name: opa - replaces: loki-operator.v0.3.0 - version: 0.4.0 + replaces: loki-operator.v0.4.0 + version: 0.5.0 webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml index e8ebdc32528f7..cbff5d853c27b 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_alertingrules.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: alertingrules.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml index 86c36c4eb88b4..1a4120613e358 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: lokistacks.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml index 2a827b65e0c63..0d157b3359cb6 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_recordingrules.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: recordingrules.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml index a218c6e076c0e..7c90da29b19bc 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: rulerconfigs.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml index b7f50405c453f..eb601bf525c4d 100644 --- a/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml +++ b/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml @@ -3,11 +3,11 @@ kind: Service metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-controller-manager-metrics-service spec: ports: diff --git a/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml index a5e6538f4f1f7..487a72b6e3ad1 100644 --- a/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml +++ b/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml @@ -24,9 +24,9 @@ data: kind: ConfigMap metadata: labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-manager-config diff --git a/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml index 96738fd625cc7..ed266cabf3925 100644 --- a/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml +++ b/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -3,11 +3,11 @@ kind: ClusterRole metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-metrics-reader rules: - nonResourceURLs: diff --git a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml index 5472ebb497895..f427ef94b46d0 100644 --- a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml +++ b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml @@ -6,11 +6,11 @@ metadata: include.release.openshift.io/single-node-developer: "true" creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-prometheus rules: - apiGroups: diff --git a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml index d37772946f2d7..ac051dc2b594a 100644 --- a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml +++ b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml @@ -6,11 +6,11 @@ metadata: include.release.openshift.io/single-node-developer: "true" creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-prometheus roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml b/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml index 5979829a9e38c..94d97b4c3339d 100644 --- a/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml +++ b/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml @@ -3,11 +3,11 @@ kind: Service metadata: creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: loki-operator-webhook-service spec: ports: diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml index e65f2a081488f..3b989f498e5ab 100644 --- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -149,8 +149,8 @@ metadata: capabilities: Full Lifecycle categories: OpenShift Optional, Logging & Tracing certified: "false" - containerImage: docker.io/grafana/loki-operator:0.4.0 - createdAt: "2023-10-17T07:40:27Z" + containerImage: docker.io/grafana/loki-operator:0.5.0 + createdAt: "2023-10-23T07:38:57Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -160,7 +160,7 @@ metadata: labels: operatorframework.io/arch.amd64: supported operatorframework.io/arch.arm64: supported - name: loki-operator.v0.4.0 + name: loki-operator.v0.5.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -1610,11 +1610,11 @@ spec: serviceAccountName: default deployments: - label: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 control-plane: controller-manager name: loki-operator-controller-manager spec: @@ -1648,7 +1648,7 @@ spec: value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA value: quay.io/observatorium/opa-openshift:latest - image: docker.io/grafana/loki-operator:0.4.0 + image: docker.io/grafana/loki-operator:0.5.0 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -1760,8 +1760,8 @@ spec: name: gateway - image: quay.io/observatorium/opa-openshift:latest name: opa - replaces: loki-operator.v0.3.0 - version: 0.4.0 + replaces: loki-operator.v0.4.0 + version: 0.5.0 webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml index dbb6b869602ee..3d6fdd8edc5a2 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: alertingrules.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml index 90ba4f19c5275..3c0f8321ebe60 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: lokistacks.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml index ec5eb9cc61358..ef3be7886f92c 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: recordingrules.loki.grafana.com spec: conversion: diff --git a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml index 689d10a5d6ff4..db77d5805247f 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml @@ -5,11 +5,11 @@ metadata: controller-gen.kubebuilder.io/version: v0.13.0 creationTimestamp: null labels: - app.kubernetes.io/instance: loki-operator-v0.4.0 + app.kubernetes.io/instance: loki-operator-v0.5.0 app.kubernetes.io/managed-by: operator-lifecycle-manager app.kubernetes.io/name: loki-operator app.kubernetes.io/part-of: loki-operator - app.kubernetes.io/version: 0.4.0 + app.kubernetes.io/version: 0.5.0 name: rulerconfigs.loki.grafana.com spec: conversion: diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index f640812464a43..7568b49a76286 100644 --- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: quay.io/openshift-logging/loki-operator:0.1.0 - createdAt: "2023-10-17T07:40:31Z" + createdAt: "2023-10-23T07:39:06Z" description: | The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. ## Prerequisites and Requirements diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml index 7a4af5c0f016a..91dcd4100b59e 100644 --- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml @@ -6,7 +6,7 @@ metadata: capabilities: Full Lifecycle categories: OpenShift Optional, Logging & Tracing certified: "false" - containerImage: docker.io/grafana/loki-operator:0.4.0 + containerImage: docker.io/grafana/loki-operator:0.5.0 createdAt: "2022-12-22T13:28:40+00:00" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. @@ -2218,5 +2218,5 @@ spec: minKubeVersion: 1.21.1 provider: name: Grafana Loki SIG Operator - replaces: loki-operator.v0.3.0 + replaces: loki-operator.v0.4.0 version: 0.0.0 diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml index 2d8f27cd01f11..a41a17cba4608 100644 --- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml @@ -6,7 +6,7 @@ metadata: capabilities: Full Lifecycle categories: OpenShift Optional, Logging & Tracing certified: "false" - containerImage: docker.io/grafana/loki-operator:0.4.0 + containerImage: docker.io/grafana/loki-operator:0.5.0 createdAt: "2022-12-22T13:28:40+00:00" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. @@ -2205,5 +2205,5 @@ spec: minKubeVersion: 1.21.1 provider: name: Grafana Loki SIG Operator - replaces: loki-operator.v0.3.0 + replaces: loki-operator.v0.4.0 version: 0.0.0 diff --git a/operator/config/overlays/community-openshift/kustomization.yaml b/operator/config/overlays/community-openshift/kustomization.yaml index 2981d73c458dc..957281a019b73 100644 --- a/operator/config/overlays/community-openshift/kustomization.yaml +++ b/operator/config/overlays/community-openshift/kustomization.yaml @@ -11,8 +11,8 @@ labels: app.kubernetes.io/managed-by: operator-lifecycle-manager includeSelectors: true - pairs: - app.kubernetes.io/instance: loki-operator-v0.4.0 - app.kubernetes.io/version: "0.4.0" + app.kubernetes.io/instance: loki-operator-v0.5.0 + app.kubernetes.io/version: "0.5.0" configMapGenerator: - files: @@ -27,4 +27,4 @@ patchesStrategicMerge: images: - name: controller newName: docker.io/grafana/loki-operator - newTag: 0.4.0 + newTag: 0.5.0 diff --git a/operator/config/overlays/community/kustomization.yaml b/operator/config/overlays/community/kustomization.yaml index 7aa216f1f7166..144da82d5dc65 100644 --- a/operator/config/overlays/community/kustomization.yaml +++ b/operator/config/overlays/community/kustomization.yaml @@ -22,8 +22,8 @@ labels: app.kubernetes.io/managed-by: operator-lifecycle-manager includeSelectors: true - pairs: - app.kubernetes.io/instance: loki-operator-v0.4.0 - app.kubernetes.io/version: "0.4.0" + app.kubernetes.io/instance: loki-operator-v0.5.0 + app.kubernetes.io/version: "0.5.0" generatorOptions: disableNameSuffixHash: true @@ -43,7 +43,7 @@ patchesStrategicMerge: images: - name: controller newName: docker.io/grafana/loki-operator - newTag: 0.4.0 + newTag: 0.5.0 # the following config is for teaching kustomize how to do var substitution vars: From 557b516b665eda3eb2c37d43ba69db65f81a181e Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Wed, 25 Oct 2023 07:25:06 -0700 Subject: [PATCH 21/33] Refactor the proto Request interface to return time.Time (#11018) Part of our query path improvements. In addition to Karstens new protobuf formats for all the query request and responses, plus our use of just proto grpc rather than httpgrpc, we'd like refactor the proto Request interface to return time.Time for start and end instead of int64. This will allow his protobuf formats to use more `logproto` proto types. There's still one or two tests that I need to fix. --------- Signed-off-by: Callum Styan Co-authored-by: Karsten Jeschkies --- pkg/logproto/compat.go | 36 ++-- pkg/querier/queryrange/codec.go | 74 +++---- pkg/querier/queryrange/index_stats_cache.go | 2 +- .../queryrange/index_stats_cache_test.go | 2 +- pkg/querier/queryrange/limits.go | 29 ++- pkg/querier/queryrange/limits_test.go | 4 +- pkg/querier/queryrange/log_result_cache.go | 2 +- .../queryrangebase/definitions/interface.go | 7 +- .../queryrange/queryrangebase/query_range.go | 100 +-------- .../queryrangebase/query_range_test.go | 65 ------ .../queryrangebase/queryrange.pb.go | 199 ++++++++++-------- .../queryrangebase/queryrange.proto | 11 +- .../queryrangebase/results_cache.go | 30 +-- .../queryrangebase/results_cache_test.go | 158 +++++++------- .../queryrange/queryrangebase/step_align.go | 7 +- .../queryrangebase/step_align_test.go | 17 +- pkg/querier/queryrange/querysharding.go | 10 +- pkg/querier/queryrange/querysharding_test.go | 7 +- pkg/querier/queryrange/roundtrip.go | 12 +- pkg/querier/queryrange/shard_resolver.go | 4 +- pkg/querier/queryrange/split_by_interval.go | 12 +- pkg/querier/queryrange/volume_cache.go | 2 +- pkg/querier/queryrange/volume_cache_test.go | 2 +- 23 files changed, 337 insertions(+), 455 deletions(-) diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go index e195af968731a..8e3d24df800f1 100644 --- a/pkg/logproto/compat.go +++ b/pkg/logproto/compat.go @@ -232,13 +232,13 @@ func MergeSeriesResponses(responses []*SeriesResponse) (*SeriesResponse, error) // Satisfy definitions.Request // GetStart returns the start timestamp of the request in milliseconds. -func (m *IndexStatsRequest) GetStart() int64 { - return int64(m.From) +func (m *IndexStatsRequest) GetStart() time.Time { + return time.Unix(0, m.From.UnixNano()) } // GetEnd returns the end timestamp of the request in milliseconds. -func (m *IndexStatsRequest) GetEnd() int64 { - return int64(m.Through) +func (m *IndexStatsRequest) GetEnd() time.Time { + return time.Unix(0, m.Through.UnixNano()) } // GetStep returns the step of the request in milliseconds. @@ -253,10 +253,10 @@ func (m *IndexStatsRequest) GetQuery() string { func (m *IndexStatsRequest) GetCachingOptions() (res definitions.CachingOptions) { return } // WithStartEnd clone the current request with different start and end timestamp. -func (m *IndexStatsRequest) WithStartEnd(startTime int64, endTime int64) definitions.Request { +func (m *IndexStatsRequest) WithStartEnd(start, end time.Time) definitions.Request { clone := *m - clone.From = model.TimeFromUnixNano(startTime * int64(time.Millisecond)) - clone.Through = model.TimeFromUnixNano(endTime * int64(time.Millisecond)) + clone.From = model.TimeFromUnixNano(start.UnixNano()) + clone.Through = model.TimeFromUnixNano(end.UnixNano()) return &clone } @@ -271,21 +271,21 @@ func (m *IndexStatsRequest) WithQuery(query string) definitions.Request { func (m *IndexStatsRequest) LogToSpan(sp opentracing.Span) { sp.LogFields( otlog.String("query", m.GetQuery()), - otlog.String("start", timestamp.Time(m.GetStart()).String()), - otlog.String("end", timestamp.Time(m.GetEnd()).String()), + otlog.String("start", timestamp.Time(int64(m.From)).String()), + otlog.String("end", timestamp.Time(int64(m.Through)).String()), ) } // Satisfy definitions.Request for Volume // GetStart returns the start timestamp of the request in milliseconds. -func (m *VolumeRequest) GetStart() int64 { - return int64(m.From) +func (m *VolumeRequest) GetStart() time.Time { + return time.UnixMilli(int64(m.From)) } // GetEnd returns the end timestamp of the request in milliseconds. -func (m *VolumeRequest) GetEnd() int64 { - return int64(m.Through) +func (m *VolumeRequest) GetEnd() time.Time { + return time.UnixMilli(int64(m.Through)) } // GetQuery returns the query of the request. @@ -297,10 +297,10 @@ func (m *VolumeRequest) GetQuery() string { func (m *VolumeRequest) GetCachingOptions() (res definitions.CachingOptions) { return } // WithStartEnd clone the current request with different start and end timestamp. -func (m *VolumeRequest) WithStartEnd(startTime int64, endTime int64) definitions.Request { +func (m *VolumeRequest) WithStartEnd(start, end time.Time) definitions.Request { clone := *m - clone.From = model.TimeFromUnixNano(startTime * int64(time.Millisecond)) - clone.Through = model.TimeFromUnixNano(endTime * int64(time.Millisecond)) + clone.From = model.TimeFromUnixNano(start.UnixNano()) + clone.Through = model.TimeFromUnixNano(end.UnixNano()) return &clone } @@ -315,8 +315,8 @@ func (m *VolumeRequest) WithQuery(query string) definitions.Request { func (m *VolumeRequest) LogToSpan(sp opentracing.Span) { sp.LogFields( otlog.String("query", m.GetQuery()), - otlog.String("start", timestamp.Time(m.GetStart()).String()), - otlog.String("end", timestamp.Time(m.GetEnd()).String()), + otlog.String("start", timestamp.Time(int64(m.From)).String()), + otlog.String("end", timestamp.Time(int64(m.Through)).String()), ) } diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 03559b2e2f2ef..b24a46146b382 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -46,18 +46,18 @@ type RequestProtobufCodec struct { Codec } -func (r *LokiRequest) GetEnd() int64 { - return r.EndTs.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +func (r *LokiRequest) GetEnd() time.Time { + return r.EndTs } -func (r *LokiRequest) GetStart() int64 { - return r.StartTs.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +func (r *LokiRequest) GetStart() time.Time { + return r.StartTs } -func (r *LokiRequest) WithStartEnd(s int64, e int64) queryrangebase.Request { +func (r *LokiRequest) WithStartEnd(s time.Time, e time.Time) queryrangebase.Request { clone := *r - clone.StartTs = time.Unix(0, s*int64(time.Millisecond)) - clone.EndTs = time.Unix(0, e*int64(time.Millisecond)) + clone.StartTs = s + clone.EndTs = e return &clone } @@ -83,8 +83,8 @@ func (r *LokiRequest) WithShards(shards logql.Shards) *LokiRequest { func (r *LokiRequest) LogToSpan(sp opentracing.Span) { sp.LogFields( otlog.String("query", r.GetQuery()), - otlog.String("start", timestamp.Time(r.GetStart()).String()), - otlog.String("end", timestamp.Time(r.GetEnd()).String()), + otlog.String("start", timestamp.Time(r.GetStart().UnixNano()).String()), + otlog.String("end", timestamp.Time(r.GetEnd().UnixNano()).String()), otlog.Int64("step (ms)", r.GetStep()), otlog.Int64("interval (ms)", r.GetInterval()), otlog.Int64("limit", int64(r.GetLimit())), @@ -99,17 +99,17 @@ func (r *LokiInstantRequest) GetStep() int64 { return 0 } -func (r *LokiInstantRequest) GetEnd() int64 { - return r.TimeTs.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +func (r *LokiInstantRequest) GetEnd() time.Time { + return r.TimeTs } -func (r *LokiInstantRequest) GetStart() int64 { - return r.TimeTs.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +func (r *LokiInstantRequest) GetStart() time.Time { + return r.TimeTs } -func (r *LokiInstantRequest) WithStartEnd(s int64, _ int64) queryrangebase.Request { +func (r *LokiInstantRequest) WithStartEnd(s time.Time, _ time.Time) queryrangebase.Request { clone := *r - clone.TimeTs = time.Unix(0, s*int64(time.Millisecond)) + clone.TimeTs = s return &clone } @@ -128,7 +128,7 @@ func (r *LokiInstantRequest) WithShards(shards logql.Shards) *LokiInstantRequest func (r *LokiInstantRequest) LogToSpan(sp opentracing.Span) { sp.LogFields( otlog.String("query", r.GetQuery()), - otlog.String("ts", timestamp.Time(r.GetStart()).String()), + otlog.String("ts", timestamp.Time(r.GetStart().UnixMilli()).String()), otlog.Int64("limit", int64(r.GetLimit())), otlog.String("direction", r.GetDirection().String()), otlog.String("shards", strings.Join(r.GetShards(), ",")), @@ -137,18 +137,18 @@ func (r *LokiInstantRequest) LogToSpan(sp opentracing.Span) { func (*LokiInstantRequest) GetCachingOptions() (res queryrangebase.CachingOptions) { return } -func (r *LokiSeriesRequest) GetEnd() int64 { - return r.EndTs.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +func (r *LokiSeriesRequest) GetEnd() time.Time { + return r.EndTs } -func (r *LokiSeriesRequest) GetStart() int64 { - return r.StartTs.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +func (r *LokiSeriesRequest) GetStart() time.Time { + return r.StartTs } -func (r *LokiSeriesRequest) WithStartEnd(s int64, e int64) queryrangebase.Request { +func (r *LokiSeriesRequest) WithStartEnd(s, e time.Time) queryrangebase.Request { clone := *r - clone.StartTs = time.Unix(0, s*int64(time.Millisecond)) - clone.EndTs = time.Unix(0, e*int64(time.Millisecond)) + clone.StartTs = s + clone.EndTs = e return &clone } @@ -168,8 +168,8 @@ func (r *LokiSeriesRequest) GetStep() int64 { func (r *LokiSeriesRequest) LogToSpan(sp opentracing.Span) { sp.LogFields( otlog.String("matchers", strings.Join(r.GetMatch(), ",")), - otlog.String("start", timestamp.Time(r.GetStart()).String()), - otlog.String("end", timestamp.Time(r.GetEnd()).String()), + otlog.String("start", timestamp.Time(r.GetStart().UnixNano()).String()), + otlog.String("end", timestamp.Time(r.GetEnd().UnixNano()).String()), otlog.String("shards", strings.Join(r.GetShards(), ",")), ) } @@ -199,16 +199,16 @@ func (r *LabelRequest) AsProto() *logproto.LabelRequest { return &r.LabelRequest } -func (r *LabelRequest) GetEnd() int64 { - return r.End.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +func (r *LabelRequest) GetEnd() time.Time { + return *r.End } func (r *LabelRequest) GetEndTs() time.Time { return *r.End } -func (r *LabelRequest) GetStart() int64 { - return r.Start.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +func (r *LabelRequest) GetStart() time.Time { + return *r.Start } func (r *LabelRequest) GetStartTs() time.Time { @@ -219,11 +219,11 @@ func (r *LabelRequest) GetStep() int64 { return 0 } -func (r *LabelRequest) WithStartEnd(s int64, e int64) queryrangebase.Request { +func (r *LabelRequest) WithStartEnd(s, e time.Time) queryrangebase.Request { clone := *r - tmp := time.Unix(0, s*int64(time.Millisecond)) + tmp := s clone.Start = &tmp - tmp = time.Unix(0, e*int64(time.Millisecond)) + tmp = e clone.End = &tmp return &clone } @@ -236,8 +236,8 @@ func (r *LabelRequest) WithQuery(query string) queryrangebase.Request { func (r *LabelRequest) LogToSpan(sp opentracing.Span) { sp.LogFields( - otlog.String("start", timestamp.Time(r.GetStart()).String()), - otlog.String("end", timestamp.Time(r.GetEnd()).String()), + otlog.String("start", timestamp.Time(r.GetStart().UnixNano()).String()), + otlog.String("end", timestamp.Time(r.GetEnd().UnixNano()).String()), ) } @@ -504,7 +504,7 @@ func (Codec) DecodeHTTPGrpcResponse(r *httpgrpc.HTTPResponse, req queryrangebase return decodeResponseJSONFrom(r.Body, req, headers) } -func (Codec) EncodeHTTPGrpcResponse(ctx context.Context, req *httpgrpc.HTTPRequest, res queryrangebase.Response) (*httpgrpc.HTTPResponse, error) { +func (Codec) EncodeHTTPGrpcResponse(_ context.Context, req *httpgrpc.HTTPRequest, res queryrangebase.Response) (*httpgrpc.HTTPResponse, error) { version := loghttp.GetVersion(req.Url) var buf bytes.Buffer @@ -1341,8 +1341,8 @@ func ParamsFromRequest(req queryrangebase.Request) (logql.Params, error) { Query: r.GetQuery(), Limit: uint32(r.GetLimit()), Step: r.GetStep(), - StartTs: time.UnixMilli(r.GetStart()), - EndTs: time.UnixMilli(r.GetEnd()), + StartTs: time.UnixMilli(r.GetStart().UnixNano()), + EndTs: time.UnixMilli(r.GetEnd().UnixNano()), }, }, nil case *LokiInstantRequest: diff --git a/pkg/querier/queryrange/index_stats_cache.go b/pkg/querier/queryrange/index_stats_cache.go index 27d40db2da5f9..4814394fd47ab 100644 --- a/pkg/querier/queryrange/index_stats_cache.go +++ b/pkg/querier/queryrange/index_stats_cache.go @@ -83,7 +83,7 @@ func shouldCacheStats(ctx context.Context, req queryrangebase.Request, lim Limit maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture) now := statsCacheMiddlewareNowTimeFunc() - return maxCacheFreshness == 0 || model.Time(req.GetEnd()).Before(now.Add(-maxCacheFreshness)), nil + return maxCacheFreshness == 0 || model.Time(req.GetEnd().UnixMilli()).Before(now.Add(-maxCacheFreshness)), nil } func NewIndexStatsCacheMiddleware( diff --git a/pkg/querier/queryrange/index_stats_cache_test.go b/pkg/querier/queryrange/index_stats_cache_test.go index 3024ba60dada4..8c154c36a2495 100644 --- a/pkg/querier/queryrange/index_stats_cache_test.go +++ b/pkg/querier/queryrange/index_stats_cache_test.go @@ -78,7 +78,7 @@ func TestIndexStatsCache(t *testing.T) { // should reuse part of the previous request and issue a new request for the remaining time till end. // The new start time is 15m (i.e. 25%) in the future with regard to the previous request time span. *calls = 0 - req := statsReq.WithStartEnd(statsReq.GetStart()+(15*time.Minute).Milliseconds(), statsReq.GetEnd()+(15*time.Minute).Milliseconds()) + req := statsReq.WithStartEnd(statsReq.GetStart().Add(15*time.Minute), statsReq.GetEnd().Add(15*time.Minute)) expectedStats := &IndexStatsResponse{ Response: &logproto.IndexStatsResponse{ Streams: 2, diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go index ddf38d30cd004..82bef4bf958a9 100644 --- a/pkg/querier/queryrange/limits.go +++ b/pkg/querier/queryrange/limits.go @@ -29,7 +29,6 @@ import ( "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/index/stats" - "github.com/grafana/loki/pkg/util" util_log "github.com/grafana/loki/pkg/util/log" "github.com/grafana/loki/pkg/util/spanlogger" "github.com/grafana/loki/pkg/util/validation" @@ -109,7 +108,7 @@ func (l cacheKeyLimits) GenerateCacheKey(ctx context.Context, userID string, r q var currentInterval int64 if denominator := int64(split / time.Millisecond); denominator > 0 { - currentInterval = r.GetStart() / denominator + currentInterval = r.GetStart().UnixMilli() / denominator } if l.transformer != nil { @@ -150,26 +149,26 @@ func (l limitsMiddleware) Do(ctx context.Context, r queryrangebase.Request) (que // Clamp the time range based on the max query lookback. lookbackCapture := func(id string) time.Duration { return l.MaxQueryLookback(ctx, id) } if maxQueryLookback := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, lookbackCapture); maxQueryLookback > 0 { - minStartTime := util.TimeToMillis(time.Now().Add(-maxQueryLookback)) + minStartTime := time.Now().Add(-maxQueryLookback) - if r.GetEnd() < minStartTime { + if r.GetEnd().Before(minStartTime) { // The request is fully outside the allowed range, so we can return an // empty response. level.Debug(log).Log( "msg", "skipping the execution of the query because its time range is before the 'max query lookback' setting", - "reqStart", util.FormatTimeMillis(r.GetStart()), - "redEnd", util.FormatTimeMillis(r.GetEnd()), + "reqStart", r.GetStart().String(), + "redEnd", r.GetEnd().String(), "maxQueryLookback", maxQueryLookback) return NewEmptyResponse(r) } - if r.GetStart() < minStartTime { + if r.GetStart().Before(minStartTime) { // Replace the start time in the request. level.Debug(log).Log( "msg", "the start time of the query has been manipulated because of the 'max query lookback' setting", - "original", util.FormatTimeMillis(r.GetStart()), - "updated", util.FormatTimeMillis(minStartTime)) + "original", r.GetStart().String(), + "updated", minStartTime.String()) r = r.WithStartEnd(minStartTime, r.GetEnd()) } @@ -178,7 +177,7 @@ func (l limitsMiddleware) Do(ctx context.Context, r queryrangebase.Request) (que // Enforce the max query length. lengthCapture := func(id string) time.Duration { return l.MaxQueryLength(ctx, id) } if maxQueryLength := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, lengthCapture); maxQueryLength > 0 { - queryLen := timestamp.Time(r.GetEnd()).Sub(timestamp.Time(r.GetStart())) + queryLen := timestamp.Time(r.GetEnd().UnixMilli()).Sub(timestamp.Time(r.GetStart().UnixMilli())) if queryLen > maxQueryLength { return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, queryLen, model.Duration(maxQueryLength)) } @@ -281,7 +280,7 @@ func (q *querySizeLimiter) getBytesReadForRequest(ctx context.Context, r queryra // TODO: Set concurrency dynamically as in shardResolverForConf? start := time.Now() const maxConcurrentIndexReq = 10 - matcherStats, err := getStatsForMatchers(ctx, q.logger, q.statsHandler, model.Time(r.GetStart()), model.Time(r.GetEnd()), matcherGroups, maxConcurrentIndexReq, q.maxLookBackPeriod) + matcherStats, err := getStatsForMatchers(ctx, q.logger, q.statsHandler, model.Time(r.GetStart().UnixMilli()), model.Time(r.GetEnd().UnixMilli()), matcherGroups, maxConcurrentIndexReq, q.maxLookBackPeriod) if err != nil { return 0, err } @@ -309,8 +308,8 @@ func (q *querySizeLimiter) getSchemaCfg(r queryrangebase.Request) (config.Period return config.PeriodConfig{}, errors.New("failed to get range-vector and offset duration: " + err.Error()) } - adjustedStart := int64(model.Time(r.GetStart()).Add(-maxRVDuration).Add(-maxOffset)) - adjustedEnd := int64(model.Time(r.GetEnd()).Add(-maxOffset)) + adjustedStart := int64(model.Time(r.GetStart().UnixMilli()).Add(-maxRVDuration).Add(-maxOffset)) + adjustedEnd := int64(model.Time(r.GetEnd().UnixMilli()).Add(-maxOffset)) return ShardingConfigs(q.cfg).ValidRange(adjustedStart, adjustedEnd) } @@ -474,8 +473,8 @@ func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Reque tenantIDs, rt.configs, rt.limits, - model.Time(request.GetStart()), - model.Time(request.GetEnd()), + model.Time(request.GetStart().UnixMilli()), + model.Time(request.GetEnd().UnixMilli()), ) if parallelism < 1 { diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index 02c3862dd45a6..4ab81ec4acd3b 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -43,7 +43,7 @@ func TestLimits(t *testing.T) { require.Equal( t, - fmt.Sprintf("%s:%s:%d:%d:%d", "a", r.GetQuery(), r.GetStep(), r.GetStart()/int64(time.Hour/time.Millisecond), int64(time.Hour)), + fmt.Sprintf("%s:%s:%d:%d:%d", "a", r.GetQuery(), r.GetStep(), r.GetStart().UnixMilli()/int64(time.Hour/time.Millisecond), int64(time.Hour)), cacheKeyLimits{wrapped, nil}.GenerateCacheKey(context.Background(), "a", r), ) } @@ -580,7 +580,7 @@ func Test_MaxQuerySize_MaxLookBackPeriod(t *testing.T) { statsHandler := base.HandlerFunc(func(_ context.Context, req base.Request) (base.Response, error) { // This is the actual check that we're testing. - require.Equal(t, testTime.Add(-engineOpts.MaxLookBackPeriod).UnixMilli(), req.GetStart()) + require.Equal(t, testTime.Add(-engineOpts.MaxLookBackPeriod).UnixMilli(), req.GetStart().UnixMilli()) return &IndexStatsResponse{ Response: &logproto.IndexStatsResponse{ diff --git a/pkg/querier/queryrange/log_result_cache.go b/pkg/querier/queryrange/log_result_cache.go index 3f0555709b9e7..a83041a94cd5b 100644 --- a/pkg/querier/queryrange/log_result_cache.go +++ b/pkg/querier/queryrange/log_result_cache.go @@ -95,7 +95,7 @@ func (l *logResultCache) Do(ctx context.Context, req queryrangebase.Request) (qu cacheFreshnessCapture := func(id string) time.Duration { return l.limits.MaxCacheFreshness(ctx, id) } maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture) maxCacheTime := int64(model.Now().Add(-maxCacheFreshness)) - if req.GetEnd() > maxCacheTime { + if req.GetEnd().UnixMilli() > maxCacheTime { return l.next.Do(ctx, req) } diff --git a/pkg/querier/queryrange/queryrangebase/definitions/interface.go b/pkg/querier/queryrange/queryrangebase/definitions/interface.go index dbb350a9c7ba4..fb385817b5ba6 100644 --- a/pkg/querier/queryrange/queryrangebase/definitions/interface.go +++ b/pkg/querier/queryrange/queryrangebase/definitions/interface.go @@ -3,6 +3,7 @@ package definitions import ( "context" "net/http" + "time" "github.com/gogo/protobuf/proto" "github.com/opentracing/opentracing-go" @@ -32,9 +33,9 @@ type Merger interface { // Request represents a query range request that can be process by middlewares. type Request interface { // GetStart returns the start timestamp of the request in milliseconds. - GetStart() int64 + GetStart() time.Time // GetEnd returns the end timestamp of the request in milliseconds. - GetEnd() int64 + GetEnd() time.Time // GetStep returns the step of the request in milliseconds. GetStep() int64 // GetQuery returns the query of the request. @@ -42,7 +43,7 @@ type Request interface { // GetCachingOptions returns the caching options. GetCachingOptions() CachingOptions // WithStartEnd clone the current request with different start and end timestamp. - WithStartEnd(startTime int64, endTime int64) Request + WithStartEnd(start time.Time, end time.Time) Request // WithQuery clone the current request with a different query. WithQuery(string) Request proto.Message diff --git a/pkg/querier/queryrange/queryrangebase/query_range.go b/pkg/querier/queryrange/queryrangebase/query_range.go index 70fe36dfc08f0..4c7426b1714b1 100644 --- a/pkg/querier/queryrange/queryrangebase/query_range.go +++ b/pkg/querier/queryrange/queryrangebase/query_range.go @@ -7,10 +7,8 @@ import ( "io" "math" "net/http" - "net/url" "sort" "strconv" - "strings" "time" "github.com/gogo/status" @@ -22,7 +20,6 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/spanlogger" ) @@ -41,7 +38,7 @@ var ( errStepTooSmall = httpgrpc.Errorf(http.StatusBadRequest, "exceeded maximum resolution of 11,000 points per time series. Try increasing the value of the step parameter") // PrometheusCodec is a codec to encode and decode Prometheus query range requests and responses. - PrometheusCodec Codec = &prometheusCodec{} + PrometheusCodec = &prometheusCodec{} // Name of the cache control header. cacheControlHeader = "Cache-Control" @@ -50,7 +47,7 @@ var ( type prometheusCodec struct{} // WithStartEnd clones the current `PrometheusRequest` with a new `start` and `end` timestamp. -func (q *PrometheusRequest) WithStartEnd(start int64, end int64) Request { +func (q *PrometheusRequest) WithStartEnd(start, end time.Time) Request { clone := *q clone.Start = start clone.End = end @@ -68,8 +65,8 @@ func (q *PrometheusRequest) WithQuery(query string) Request { func (q *PrometheusRequest) LogToSpan(sp opentracing.Span) { sp.LogFields( otlog.String("query", q.GetQuery()), - otlog.String("start", timestamp.Time(q.GetStart()).String()), - otlog.String("end", timestamp.Time(q.GetEnd()).String()), + otlog.String("start", timestamp.Time(q.GetStart().UnixMilli()).String()), + otlog.String("end", timestamp.Time(q.GetEnd().UnixMilli()).String()), otlog.Int64("step (ms)", q.GetStep()), ) } @@ -137,95 +134,6 @@ func (prometheusCodec) MergeResponse(responses ...Response) (Response, error) { return &response, nil } -func (prometheusCodec) DecodeRequest(_ context.Context, r *http.Request, forwardHeaders []string) (Request, error) { - var result PrometheusRequest - var err error - result.Start, err = util.ParseTime(r.FormValue("start")) - if err != nil { - return nil, decorateWithParamName(err, "start") - } - - result.End, err = util.ParseTime(r.FormValue("end")) - if err != nil { - return nil, decorateWithParamName(err, "end") - } - - if result.End < result.Start { - return nil, errEndBeforeStart - } - - result.Step, err = parseDurationMs(r.FormValue("step")) - if err != nil { - return nil, decorateWithParamName(err, "step") - } - - if result.Step <= 0 { - return nil, errNegativeStep - } - - // For safety, limit the number of returned points per timeseries. - // This is sufficient for 60s resolution for a week or 1h resolution for a year. - if (result.End-result.Start)/result.Step > 11000 { - return nil, errStepTooSmall - } - - result.Query = r.FormValue("query") - result.Path = r.URL.Path - - // Include the specified headers from http request in prometheusRequest. - for _, header := range forwardHeaders { - for h, hv := range r.Header { - if strings.EqualFold(h, header) { - result.Headers = append(result.Headers, &PrometheusRequestHeader{Name: h, Values: hv}) - break - } - } - } - - for _, value := range r.Header.Values(cacheControlHeader) { - if strings.Contains(value, noStoreValue) { - result.CachingOptions.Disabled = true - break - } - } - - return &result, nil -} - -func (prometheusCodec) EncodeRequest(ctx context.Context, r Request) (*http.Request, error) { - promReq, ok := r.(*PrometheusRequest) - if !ok { - return nil, httpgrpc.Errorf(http.StatusBadRequest, "invalid request format") - } - params := url.Values{ - "start": []string{encodeTime(promReq.Start)}, - "end": []string{encodeTime(promReq.End)}, - "step": []string{encodeDurationMs(promReq.Step)}, - "query": []string{promReq.Query}, - } - u := &url.URL{ - Path: promReq.Path, - RawQuery: params.Encode(), - } - var h = http.Header{} - - for _, hv := range promReq.Headers { - for _, v := range hv.Values { - h.Add(hv.Name, v) - } - } - - req := &http.Request{ - Method: "GET", - RequestURI: u.String(), // This is what the httpgrpc code looks at. - URL: u, - Body: http.NoBody, - Header: h, - } - - return req.WithContext(ctx), nil -} - func (prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ Request) (Response, error) { if r.StatusCode/100 != 2 { body, _ := io.ReadAll(r.Body) diff --git a/pkg/querier/queryrange/queryrangebase/query_range_test.go b/pkg/querier/queryrange/queryrangebase/query_range_test.go index ed46ea66adfd9..21c115eec5892 100644 --- a/pkg/querier/queryrange/queryrangebase/query_range_test.go +++ b/pkg/querier/queryrange/queryrangebase/query_range_test.go @@ -8,8 +8,6 @@ import ( "strconv" "testing" - "github.com/grafana/dskit/httpgrpc" - "github.com/grafana/dskit/user" jsoniter "github.com/json-iterator/go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,69 +15,6 @@ import ( "github.com/grafana/loki/pkg/logproto" ) -func TestRequest(t *testing.T) { - // Create a Copy parsedRequest to assign the expected headers to the request without affecting other tests using the global. - // The test below adds a Test-Header header to the request and expects it back once the encode/decode of request is done via PrometheusCodec - parsedRequestWithHeaders := *parsedRequest - parsedRequestWithHeaders.Headers = reqHeaders - for i, tc := range []struct { - url string - expected Request - expectedErr error - }{ - { - url: query, - expected: &parsedRequestWithHeaders, - }, - { - url: "api/v1/query_range?start=foo", - expectedErr: httpgrpc.Errorf(http.StatusBadRequest, "invalid parameter \"start\"; cannot parse \"foo\" to a valid timestamp"), - }, - { - url: "api/v1/query_range?start=123&end=bar", - expectedErr: httpgrpc.Errorf(http.StatusBadRequest, "invalid parameter \"end\"; cannot parse \"bar\" to a valid timestamp"), - }, - { - url: "api/v1/query_range?start=123&end=0", - expectedErr: errEndBeforeStart, - }, - { - url: "api/v1/query_range?start=123&end=456&step=baz", - expectedErr: httpgrpc.Errorf(http.StatusBadRequest, "invalid parameter \"step\"; cannot parse \"baz\" to a valid duration"), - }, - { - url: "api/v1/query_range?start=123&end=456&step=-1", - expectedErr: errNegativeStep, - }, - { - url: "api/v1/query_range?start=0&end=11001&step=1", - expectedErr: errStepTooSmall, - }, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - r, err := http.NewRequest("GET", tc.url, nil) - require.NoError(t, err) - r.Header.Add("Test-Header", "test") - - ctx := user.InjectOrgID(context.Background(), "1") - - // Get a deep copy of the request with Context changed to ctx - r = r.Clone(ctx) - - req, err := PrometheusCodec.DecodeRequest(ctx, r, []string{"Test-Header"}) - if err != nil { - require.EqualValues(t, tc.expectedErr, err) - return - } - require.EqualValues(t, tc.expected, req) - - rdash, err := PrometheusCodec.EncodeRequest(context.Background(), req) - require.NoError(t, err) - require.EqualValues(t, tc.url, rdash.RequestURI) - }) - } -} - func TestResponse(t *testing.T) { r := *parsedResponse r.Headers = respHeaders diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go index da0a14e0fdb2e..121b3ffb15351 100644 --- a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go @@ -35,8 +35,8 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type PrometheusRequest struct { Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Start int64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"` - End int64 `protobuf:"varint,3,opt,name=end,proto3" json:"end,omitempty"` + Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"` Timeout time.Duration `protobuf:"bytes,5,opt,name=timeout,proto3,stdduration" json:"timeout"` Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"` @@ -83,18 +83,18 @@ func (m *PrometheusRequest) GetPath() string { return "" } -func (m *PrometheusRequest) GetStart() int64 { +func (m *PrometheusRequest) GetStart() time.Time { if m != nil { return m.Start } - return 0 + return time.Time{} } -func (m *PrometheusRequest) GetEnd() int64 { +func (m *PrometheusRequest) GetEnd() time.Time { if m != nil { return m.End } - return 0 + return time.Time{} } func (m *PrometheusRequest) GetStep() int64 { @@ -435,59 +435,60 @@ func init() { } var fileDescriptor_4cc6a0c1d6b614c4 = []byte{ - // 821 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4f, 0x6f, 0xdc, 0x44, - 0x14, 0xdf, 0xc9, 0xfe, 0x9f, 0x54, 0xdb, 0x32, 0x8d, 0x8a, 0xd3, 0x22, 0x7b, 0xb5, 0x02, 0x29, - 0x48, 0xe0, 0x15, 0x41, 0x70, 0x2b, 0x22, 0x4e, 0x82, 0x68, 0x55, 0x89, 0x6a, 0xc2, 0x89, 0x0b, - 0x9a, 0x5d, 0xbf, 0x38, 0x56, 0xfc, 0xaf, 0x33, 0xe3, 0x8a, 0xbd, 0x71, 0xe2, 0xcc, 0x91, 0x8f, - 0xc0, 0x01, 0xf1, 0x39, 0x22, 0x4e, 0x39, 0x56, 0x1c, 0x0c, 0xd9, 0x5c, 0x90, 0x4f, 0xfd, 0x08, - 0x68, 0x66, 0xec, 0x8d, 0x77, 0xab, 0x40, 0x2f, 0xbb, 0xef, 0xcf, 0xef, 0xbd, 0x79, 0xbf, 0xdf, - 0x8c, 0x1f, 0xfe, 0x3c, 0x3b, 0x0f, 0xa6, 0x2f, 0x72, 0xe0, 0x21, 0x70, 0xfd, 0xbf, 0xe0, 0x2c, - 0x09, 0xa0, 0x61, 0xce, 0x98, 0x68, 0xba, 0x6e, 0xc6, 0x53, 0x99, 0x92, 0xd1, 0x3a, 0xe0, 0xe1, - 0x4e, 0x90, 0x06, 0xa9, 0x4e, 0x4d, 0x95, 0x65, 0x50, 0x0f, 0x77, 0x83, 0x34, 0x0d, 0x22, 0x98, - 0x6a, 0x6f, 0x96, 0x9f, 0x4e, 0x59, 0xb2, 0xa8, 0x52, 0xf6, 0x66, 0xca, 0xcf, 0x39, 0x93, 0x61, - 0x9a, 0x54, 0xf9, 0x47, 0x6a, 0xb0, 0x28, 0x0d, 0x4c, 0xcf, 0xda, 0xa8, 0x92, 0x87, 0x6f, 0x37, - 0xb5, 0x0f, 0xa7, 0x61, 0x12, 0xaa, 0xa6, 0xa2, 0x69, 0x9b, 0x26, 0x93, 0x3f, 0xb6, 0xf0, 0x3b, - 0xcf, 0x79, 0x1a, 0x83, 0x3c, 0x83, 0x5c, 0x50, 0x78, 0x91, 0x83, 0x90, 0x84, 0xe0, 0x4e, 0xc6, - 0xe4, 0x99, 0x85, 0xc6, 0x68, 0x6f, 0x48, 0xb5, 0x4d, 0x76, 0x70, 0x57, 0x48, 0xc6, 0xa5, 0xb5, - 0x35, 0x46, 0x7b, 0x6d, 0x6a, 0x1c, 0x72, 0x0f, 0xb7, 0x21, 0xf1, 0xad, 0xb6, 0x8e, 0x29, 0x53, - 0xd5, 0x0a, 0x09, 0x99, 0xd5, 0xd1, 0x21, 0x6d, 0x93, 0xc7, 0xb8, 0x2f, 0xc3, 0x18, 0xd2, 0x5c, - 0x5a, 0xdd, 0x31, 0xda, 0xdb, 0xde, 0xdf, 0x75, 0x0d, 0x73, 0xb7, 0x66, 0xee, 0x1e, 0x55, 0xcc, - 0xbd, 0xc1, 0x45, 0xe1, 0xb4, 0x7e, 0xf9, 0xcb, 0x41, 0xb4, 0xae, 0x51, 0x47, 0x6b, 0x52, 0x56, - 0x4f, 0xcf, 0x63, 0x1c, 0xf2, 0x04, 0x8f, 0xe6, 0x6c, 0x7e, 0x16, 0x26, 0xc1, 0x37, 0x99, 0xa6, - 0x64, 0xf5, 0x75, 0xef, 0x47, 0x6e, 0x93, 0xe6, 0xe1, 0x1a, 0xc4, 0xeb, 0xa8, 0xee, 0x74, 0xa3, - 0x90, 0x1c, 0xe3, 0xfe, 0xd7, 0xc0, 0x7c, 0xe0, 0xc2, 0x1a, 0x8c, 0xdb, 0x7b, 0xdb, 0xfb, 0xef, - 0xaf, 0xf5, 0x78, 0x43, 0x20, 0x03, 0xf6, 0xba, 0x65, 0xe1, 0xa0, 0x8f, 0x69, 0x5d, 0x3b, 0xf9, - 0x7d, 0x0b, 0x93, 0x26, 0x56, 0x64, 0x69, 0x22, 0x80, 0x4c, 0x70, 0xef, 0x44, 0x32, 0x99, 0x0b, - 0xa3, 0xa7, 0x87, 0xcb, 0xc2, 0xe9, 0x09, 0x1d, 0xa1, 0x55, 0x86, 0x3c, 0xc5, 0x9d, 0x23, 0x26, - 0x99, 0x16, 0x77, 0x7b, 0xdf, 0x76, 0xd7, 0x2f, 0xb1, 0x31, 0x81, 0x42, 0x79, 0x0f, 0x14, 0x8b, - 0xb2, 0x70, 0x46, 0x3e, 0x93, 0xec, 0xa3, 0x34, 0x0e, 0x25, 0xc4, 0x99, 0x5c, 0x50, 0xdd, 0x83, - 0x7c, 0x86, 0x87, 0xc7, 0x9c, 0xa7, 0xfc, 0xdb, 0x45, 0x06, 0xfa, 0x66, 0x86, 0xde, 0xbb, 0x65, - 0xe1, 0xdc, 0x87, 0x3a, 0xd8, 0xa8, 0xb8, 0x41, 0x92, 0x0f, 0x71, 0x57, 0x3b, 0xfa, 0xe6, 0x86, - 0xde, 0xfd, 0xb2, 0x70, 0xee, 0xea, 0x92, 0x06, 0xdc, 0x20, 0xc8, 0x57, 0x37, 0x7a, 0x75, 0xb5, - 0x5e, 0x1f, 0xdc, 0xaa, 0x97, 0xd1, 0xe0, 0x16, 0xc1, 0x7e, 0x42, 0x78, 0xb4, 0x4e, 0x8d, 0xb8, - 0x18, 0x53, 0x10, 0x79, 0x24, 0xf5, 0xf4, 0x46, 0xb0, 0x51, 0x59, 0x38, 0x98, 0xaf, 0xa2, 0xb4, - 0x81, 0x20, 0x47, 0xb8, 0x67, 0x3c, 0x6b, 0x4b, 0x4f, 0xf2, 0xde, 0xa6, 0x74, 0x27, 0x2c, 0xce, - 0x22, 0x38, 0x91, 0x1c, 0x58, 0xec, 0x8d, 0x2a, 0xe1, 0x7a, 0xa6, 0x1b, 0xad, 0x6a, 0x27, 0x17, - 0x08, 0xdf, 0x69, 0x02, 0xc9, 0x4b, 0xdc, 0x8b, 0xd8, 0x0c, 0x22, 0x75, 0x67, 0x6d, 0xfd, 0x60, - 0x57, 0x5f, 0xdf, 0x33, 0x08, 0xd8, 0x7c, 0xf1, 0x4c, 0x65, 0x9f, 0xb3, 0x90, 0x7b, 0x87, 0xaa, - 0xe7, 0x9f, 0x85, 0xf3, 0x49, 0x10, 0xca, 0xb3, 0x7c, 0xe6, 0xce, 0xd3, 0x78, 0x1a, 0x70, 0x76, - 0xca, 0x12, 0x36, 0x8d, 0xd2, 0xf3, 0x70, 0xda, 0xfc, 0x88, 0x5d, 0x5d, 0x77, 0xe0, 0xb3, 0x4c, - 0x02, 0x57, 0x83, 0xc4, 0x20, 0x79, 0x38, 0xa7, 0xd5, 0x69, 0xe4, 0x4b, 0xdc, 0x17, 0x7a, 0x0e, - 0x51, 0xf1, 0x79, 0xb0, 0x79, 0xb0, 0x19, 0xf3, 0x86, 0xc9, 0x4b, 0x16, 0xe5, 0x20, 0x68, 0x5d, - 0x36, 0x49, 0xf0, 0x48, 0xbd, 0x79, 0xf0, 0x57, 0xef, 0x6f, 0x17, 0xb7, 0xcf, 0x61, 0x51, 0x69, - 0xd9, 0x2f, 0x0b, 0x47, 0xb9, 0x54, 0xfd, 0x90, 0x03, 0xdc, 0x87, 0x1f, 0x24, 0x24, 0xf2, 0xe6, - 0xb8, 0x0d, 0xf9, 0x8e, 0x75, 0xda, 0xbb, 0x5b, 0x1d, 0x57, 0xc3, 0x69, 0x6d, 0x4c, 0x7e, 0x43, - 0xb8, 0x67, 0x40, 0xc4, 0xa9, 0x57, 0x84, 0x3a, 0xaa, 0xed, 0x0d, 0xcb, 0xc2, 0x31, 0x81, 0x7a, - 0x5b, 0xec, 0x9a, 0x6d, 0xa1, 0x37, 0x88, 0x99, 0x04, 0x12, 0xdf, 0xac, 0x8d, 0x31, 0x1e, 0x48, - 0xce, 0xe6, 0xf0, 0x7d, 0xe8, 0x57, 0x0f, 0xb0, 0x7e, 0x2c, 0x3a, 0xfc, 0xc4, 0x27, 0x5f, 0xe0, - 0x01, 0xaf, 0x28, 0x55, 0x5b, 0x64, 0xe7, 0x8d, 0x2d, 0x72, 0x90, 0x2c, 0xbc, 0x3b, 0x65, 0xe1, - 0xac, 0x90, 0x74, 0x65, 0x3d, 0xed, 0x0c, 0xda, 0xf7, 0x3a, 0x9e, 0xb8, 0xbc, 0xb2, 0x5b, 0xaf, - 0xae, 0xec, 0xd6, 0xeb, 0x2b, 0x1b, 0xfd, 0xb8, 0xb4, 0xd1, 0xaf, 0x4b, 0x1b, 0x5d, 0x2c, 0x6d, - 0x74, 0xb9, 0xb4, 0xd1, 0xdf, 0x4b, 0x1b, 0xfd, 0xb3, 0xb4, 0x5b, 0xaf, 0x97, 0x36, 0xfa, 0xf9, - 0xda, 0x6e, 0x5d, 0x5e, 0xdb, 0xad, 0x57, 0xd7, 0x76, 0xeb, 0xbb, 0xc7, 0xff, 0x75, 0xb7, 0xff, - 0xbb, 0x83, 0x67, 0x3d, 0x3d, 0xe0, 0xa7, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xd5, 0x7e, 0x59, - 0x12, 0x69, 0x06, 0x00, 0x00, + // 846 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0x5f, 0xc7, 0xfb, 0x77, 0x5a, 0x6d, 0x61, 0x1a, 0x15, 0x27, 0x45, 0xf6, 0x6a, 0x05, 0x52, + 0x90, 0xc0, 0x2b, 0x8a, 0xe8, 0x01, 0xa9, 0x88, 0x38, 0x09, 0xa2, 0x55, 0x25, 0x2a, 0xa7, 0x27, + 0x2e, 0x68, 0x76, 0xfd, 0xe2, 0x58, 0xf1, 0xbf, 0xce, 0x8c, 0x2b, 0xf6, 0xc6, 0x89, 0x73, 0x4f, + 0x88, 0x8f, 0xc0, 0x01, 0xf1, 0x39, 0x72, 0xcc, 0xb1, 0xe2, 0x60, 0xc8, 0xe6, 0x82, 0x7c, 0xea, + 0x47, 0x40, 0xf3, 0xc7, 0xbb, 0xde, 0x8d, 0x02, 0xf4, 0xb2, 0xfb, 0x66, 0xde, 0xef, 0xbd, 0xf7, + 0x7b, 0xbf, 0x79, 0x7e, 0xe8, 0x61, 0x7e, 0x16, 0x4e, 0x5e, 0x14, 0x40, 0x23, 0xa0, 0xf2, 0x7f, + 0x4e, 0x49, 0x1a, 0x42, 0xc3, 0x9c, 0x12, 0xd6, 0x3c, 0xba, 0x39, 0xcd, 0x78, 0x86, 0x87, 0xeb, + 0x80, 0xdd, 0xed, 0x30, 0x0b, 0x33, 0xe9, 0x9a, 0x08, 0x4b, 0xa1, 0x76, 0x77, 0xc2, 0x2c, 0x0b, + 0x63, 0x98, 0xc8, 0xd3, 0xb4, 0x38, 0x99, 0x90, 0x74, 0xae, 0x5d, 0xf6, 0xa6, 0x2b, 0x28, 0x28, + 0xe1, 0x51, 0x96, 0x6a, 0xbf, 0xb3, 0xe9, 0xe7, 0x51, 0x02, 0x8c, 0x93, 0x24, 0xd7, 0x80, 0xfb, + 0x82, 0x79, 0x9c, 0x85, 0xaa, 0x68, 0x6d, 0x68, 0xe7, 0xc1, 0xff, 0x6b, 0x2b, 0x80, 0x93, 0x28, + 0x8d, 0x44, 0x55, 0xd6, 0xb4, 0x55, 0x92, 0xf1, 0xcf, 0x26, 0x7a, 0xf7, 0x19, 0xcd, 0x12, 0xe0, + 0xa7, 0x50, 0x30, 0x1f, 0x5e, 0x14, 0xc0, 0x38, 0xc6, 0xa8, 0x9d, 0x13, 0x7e, 0x6a, 0x19, 0x23, + 0x63, 0x6f, 0xe0, 0x4b, 0x1b, 0x7f, 0x81, 0x3a, 0x8c, 0x13, 0xca, 0xad, 0xad, 0x91, 0xb1, 0x77, + 0xeb, 0xc1, 0xae, 0xab, 0xc8, 0xbb, 0x35, 0x79, 0xf7, 0x79, 0x4d, 0xde, 0xeb, 0x9f, 0x97, 0x4e, + 0xeb, 0xd5, 0x9f, 0x8e, 0xe1, 0xab, 0x10, 0xfc, 0x10, 0x99, 0x90, 0x06, 0x96, 0xf9, 0x16, 0x91, + 0x22, 0x40, 0xf0, 0x60, 0x1c, 0x72, 0xab, 0x3d, 0x32, 0xf6, 0x4c, 0x5f, 0xda, 0xf8, 0x11, 0xea, + 0x09, 0x99, 0xb2, 0x82, 0x5b, 0x1d, 0x99, 0x6f, 0xe7, 0x5a, 0xbe, 0x43, 0x2d, 0xb3, 0x4a, 0xf7, + 0x8b, 0x48, 0x57, 0xc7, 0xe0, 0x6d, 0xd4, 0x91, 0x02, 0x59, 0x5d, 0xd9, 0x9b, 0x3a, 0xe0, 0xc7, + 0x68, 0x38, 0x23, 0xb3, 0xd3, 0x28, 0x0d, 0xbf, 0xcd, 0xa5, 0x3c, 0x56, 0x4f, 0xe6, 0xbe, 0xef, + 0x36, 0x25, 0x3b, 0x58, 0x83, 0x78, 0x6d, 0x91, 0xdd, 0xdf, 0x08, 0xc4, 0x47, 0xa8, 0xf7, 0x0d, + 0x90, 0x00, 0x28, 0xb3, 0xfa, 0x23, 0x73, 0xef, 0xd6, 0x83, 0x0f, 0xd6, 0x72, 0x5c, 0x13, 0x5b, + 0x81, 0xbd, 0x4e, 0x55, 0x3a, 0xc6, 0x27, 0x7e, 0x1d, 0x3b, 0xfe, 0x7d, 0x0b, 0xe1, 0x26, 0x96, + 0xe5, 0x59, 0xca, 0x00, 0x8f, 0x51, 0xf7, 0x98, 0x13, 0x5e, 0x30, 0xf5, 0x36, 0x1e, 0xaa, 0x4a, + 0xa7, 0xcb, 0xe4, 0x8d, 0xaf, 0x3d, 0xf8, 0x09, 0x6a, 0x1f, 0x12, 0x4e, 0xf4, 0x43, 0xd9, 0xee, + 0xfa, 0x40, 0x34, 0x18, 0x08, 0x94, 0x77, 0x4f, 0x74, 0x51, 0x95, 0xce, 0x30, 0x20, 0x9c, 0x7c, + 0x9c, 0x25, 0x11, 0x87, 0x24, 0xe7, 0x73, 0x5f, 0xe6, 0xc0, 0x9f, 0xa3, 0xc1, 0x11, 0xa5, 0x19, + 0x7d, 0x3e, 0xcf, 0x41, 0xbe, 0xdf, 0xc0, 0x7b, 0xaf, 0x2a, 0x9d, 0xbb, 0x50, 0x5f, 0x36, 0x22, + 0x56, 0x48, 0xfc, 0x11, 0xea, 0xc8, 0x83, 0x7c, 0xb9, 0x81, 0x77, 0xb7, 0x2a, 0x9d, 0x3b, 0x32, + 0xa4, 0x01, 0x57, 0x08, 0xfc, 0xf5, 0x4a, 0xaf, 0x8e, 0xd4, 0xeb, 0xc3, 0x1b, 0xf5, 0x52, 0x1a, + 0xdc, 0x20, 0xd8, 0x4f, 0x06, 0x1a, 0xae, 0xb7, 0x86, 0x5d, 0x84, 0x7c, 0x60, 0x45, 0xcc, 0x25, + 0x7b, 0x25, 0xd8, 0xb0, 0x2a, 0x1d, 0x44, 0x97, 0xb7, 0x7e, 0x03, 0x81, 0x0f, 0x51, 0x57, 0x9d, + 0xac, 0x2d, 0xc9, 0xe4, 0xfd, 0x4d, 0xe9, 0x8e, 0x49, 0x92, 0xc7, 0x70, 0xcc, 0x29, 0x90, 0xc4, + 0x1b, 0x6a, 0xe1, 0xba, 0x2a, 0x9b, 0xaf, 0x63, 0xc7, 0xe7, 0x06, 0xba, 0xdd, 0x04, 0xe2, 0x97, + 0xa8, 0x1b, 0x93, 0x29, 0xc4, 0xe2, 0xcd, 0x4c, 0x39, 0xb0, 0xcb, 0x2f, 0xf9, 0x29, 0x84, 0x64, + 0x36, 0x7f, 0x2a, 0xbc, 0xcf, 0x48, 0x44, 0xbd, 0x03, 0x91, 0xf3, 0x8f, 0xd2, 0xf9, 0x34, 0x8c, + 0xf8, 0x69, 0x31, 0x75, 0x67, 0x59, 0x32, 0x09, 0x29, 0x39, 0x21, 0x29, 0x99, 0xc4, 0xd9, 0x59, + 0x34, 0x69, 0x2e, 0x04, 0x57, 0xc6, 0xed, 0x07, 0x24, 0xe7, 0x40, 0x05, 0x91, 0x04, 0x38, 0x8d, + 0x66, 0xbe, 0xae, 0x86, 0xbf, 0x42, 0x3d, 0x26, 0x79, 0x30, 0xdd, 0xcf, 0xbd, 0xcd, 0xc2, 0x8a, + 0xe6, 0xaa, 0x93, 0x97, 0x24, 0x2e, 0x80, 0xf9, 0x75, 0xd8, 0x38, 0x45, 0x43, 0x31, 0xf3, 0x10, + 0x2c, 0xe7, 0x6f, 0x07, 0x99, 0x67, 0x30, 0xd7, 0x5a, 0xf6, 0xaa, 0xd2, 0x11, 0x47, 0x5f, 0xfc, + 0xe0, 0x7d, 0xd4, 0x83, 0x1f, 0x38, 0xa4, 0x7c, 0x55, 0x6e, 0x43, 0xbe, 0x23, 0xe9, 0xf6, 0xee, + 0xe8, 0x72, 0x35, 0xdc, 0xaf, 0x8d, 0xf1, 0x6f, 0x06, 0xea, 0x2a, 0x10, 0x76, 0xea, 0x75, 0x23, + 0x4a, 0x99, 0xde, 0xa0, 0x2a, 0x1d, 0x75, 0x51, 0xef, 0x94, 0x1d, 0xb5, 0x53, 0xb6, 0xa4, 0x5b, + 0x32, 0x81, 0x34, 0x50, 0x6b, 0x63, 0x84, 0xfa, 0x9c, 0x92, 0x19, 0x7c, 0x1f, 0x05, 0x7a, 0x00, + 0xeb, 0x61, 0x91, 0xd7, 0x8f, 0x03, 0xfc, 0x25, 0xea, 0x53, 0xdd, 0x92, 0xde, 0x22, 0xdb, 0xd7, + 0xb6, 0xc8, 0x7e, 0x3a, 0xf7, 0x6e, 0x57, 0xa5, 0xb3, 0x44, 0xfa, 0x4b, 0xeb, 0x49, 0xbb, 0x6f, + 0xbe, 0xd3, 0xf6, 0xd8, 0xc5, 0xa5, 0xdd, 0x7a, 0x7d, 0x69, 0xb7, 0xde, 0x5c, 0xda, 0xc6, 0x8f, + 0x0b, 0xdb, 0xf8, 0x75, 0x61, 0x1b, 0xe7, 0x0b, 0xdb, 0xb8, 0x58, 0xd8, 0xc6, 0x5f, 0x0b, 0xdb, + 0xf8, 0x7b, 0x61, 0xb7, 0xde, 0x2c, 0x6c, 0xe3, 0xd5, 0x95, 0xdd, 0xba, 0xb8, 0xb2, 0x5b, 0xaf, + 0xaf, 0xec, 0xd6, 0x77, 0x8f, 0xfe, 0xed, 0x6d, 0xff, 0x73, 0x9f, 0x4f, 0xbb, 0x92, 0xe0, 0x67, + 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x63, 0x5c, 0x0b, 0x88, 0xd6, 0x06, 0x00, 0x00, } func (this *PrometheusRequest) Equal(that interface{}) bool { @@ -512,10 +513,10 @@ func (this *PrometheusRequest) Equal(that interface{}) bool { if this.Path != that1.Path { return false } - if this.Start != that1.Start { + if !this.Start.Equal(that1.Start) { return false } - if this.End != that1.End { + if !this.End.Equal(that1.End) { return false } if this.Step != that1.Step { @@ -888,16 +889,22 @@ func (m *PrometheusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x20 } - if m.End != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) - i-- - dAtA[i] = 0x18 + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err3 != nil { + return 0, err3 } - if m.Start != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) - i-- - dAtA[i] = 0x10 + i -= n3 + i = encodeVarintQueryrange(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x1a + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err4 != nil { + return 0, err4 } + i -= n4 + i = encodeVarintQueryrange(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x12 if len(m.Path) > 0 { i -= len(m.Path) copy(dAtA[i:], m.Path) @@ -1188,12 +1195,10 @@ func (m *PrometheusRequest) Size() (n int) { if l > 0 { n += 1 + l + sovQueryrange(uint64(l)) } - if m.Start != 0 { - n += 1 + sovQueryrange(uint64(m.Start)) - } - if m.End != 0 { - n += 1 + sovQueryrange(uint64(m.End)) - } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovQueryrange(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovQueryrange(uint64(l)) if m.Step != 0 { n += 1 + sovQueryrange(uint64(m.Step)) } @@ -1342,8 +1347,8 @@ func (this *PrometheusRequest) String() string { repeatedStringForHeaders += "}" s := strings.Join([]string{`&PrometheusRequest{`, `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Start:` + fmt.Sprintf("%v", this.Start) + `,`, - `End:` + fmt.Sprintf("%v", this.End) + `,`, + `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Step:` + fmt.Sprintf("%v", this.Step) + `,`, `Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `Query:` + fmt.Sprintf("%v", this.Query) + `,`, @@ -1503,10 +1508,10 @@ func (m *PrometheusRequest) Unmarshal(dAtA []byte) error { m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { + if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) } - m.Start = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQueryrange @@ -1516,16 +1521,30 @@ func (m *PrometheusRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Start |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 3: - if wireType != 0 { + if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) } - m.End = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQueryrange @@ -1535,11 +1554,25 @@ func (m *PrometheusRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.End |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.proto b/pkg/querier/queryrange/queryrangebase/queryrange.proto index 8ee2895d66356..ad66551d2bb11 100644 --- a/pkg/querier/queryrange/queryrangebase/queryrange.proto +++ b/pkg/querier/queryrange/queryrangebase/queryrange.proto @@ -5,6 +5,7 @@ package queryrangebase; import "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; import "pkg/logproto/logproto.proto"; import "pkg/querier/queryrange/queryrangebase/definitions/definitions.proto"; @@ -14,8 +15,14 @@ option (gogoproto.unmarshaler_all) = true; message PrometheusRequest { string path = 1; - int64 start = 2; - int64 end = 3; + google.protobuf.Timestamp start = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp end = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; int64 step = 4; google.protobuf.Duration timeout = 5 [ (gogoproto.stdduration) = true, diff --git a/pkg/querier/queryrange/queryrangebase/results_cache.go b/pkg/querier/queryrange/queryrangebase/results_cache.go index c81193d88e28b..05d6a26f672ff 100644 --- a/pkg/querier/queryrange/queryrangebase/results_cache.go +++ b/pkg/querier/queryrange/queryrangebase/results_cache.go @@ -21,7 +21,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/uber/jaeger-client-go" @@ -145,7 +144,7 @@ type constSplitter time.Duration // GenerateCacheKey generates a cache key based on the userID, Request and interval. func (t constSplitter) GenerateCacheKey(_ context.Context, userID string, r Request) string { - currentInterval := r.GetStart() / int64(time.Duration(t)/time.Millisecond) + currentInterval := r.GetStart().UnixMilli() / int64(time.Duration(t)/time.Millisecond) return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval) } @@ -237,7 +236,7 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { sp.LogKV( "query", r.GetQuery(), "step", time.UnixMilli(r.GetStep()), - "start", time.UnixMilli(r.GetStart()), + "start", r.GetStart(), "end", r.GetEnd(), "key", key, ) @@ -245,7 +244,7 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { cacheFreshnessCapture := func(id string) time.Duration { return s.limits.MaxCacheFreshness(ctx, id) } maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture) maxCacheTime := int64(model.Now().Add(-maxCacheFreshness)) - if r.GetStart() > maxCacheTime { + if r.GetStart().UnixMilli() > maxCacheTime { return s.next.Do(ctx, r) } @@ -338,9 +337,9 @@ func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool { } // This resolves the start() and end() used with the @ modifier. - expr = promql.PreprocessExpr(expr, timestamp.Time(r.GetStart()), timestamp.Time(r.GetEnd())) + expr = promql.PreprocessExpr(expr, r.GetStart(), r.GetEnd()) - end := r.GetEnd() + end := r.GetEnd().UnixMilli() atModCachable := true parser.Inspect(expr, func(n parser.Node, _ []parser.Node) error { switch e := n.(type) { @@ -533,8 +532,8 @@ func toExtent(ctx context.Context, req Request, res Response) (Extent, error) { return Extent{}, err } return Extent{ - Start: req.GetStart(), - End: req.GetEnd(), + Start: req.GetStart().UnixMilli(), + End: req.GetEnd().UnixMilli(), Response: anyResp, TraceId: jaegerTraceID(ctx), }, nil @@ -545,11 +544,12 @@ func toExtent(ctx context.Context, req Request, res Response) (Extent, error) { func (s resultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) { var requests []Request var cachedResponses []Response - start := req.GetStart() + start := req.GetStart().UnixMilli() + end := req.GetEnd().UnixMilli() for _, extent := range extents { // If there is no overlap, ignore this extent. - if extent.GetEnd() < start || extent.Start > req.GetEnd() { + if extent.GetEnd() < start || extent.Start > end { continue } @@ -559,13 +559,13 @@ func (s resultsCache) partition(req Request, extents []Extent) ([]Request, []Res // However if the step is large enough, the split_query_by_interval middleware would generate a query with same start and end. // For example, if the step size is more than 12h and the interval is 24h. // This means the extent's start and end time would be same, even if the timerange covers several hours. - if (req.GetStart() != req.GetEnd()) && (req.GetEnd()-req.GetStart() > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) { + if (req.GetStart() != req.GetEnd()) && ((end - start) > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) { continue } // If there is a bit missing at the front, make a request for that. if start < extent.Start { - r := req.WithStartEnd(start, extent.Start) + r := req.WithStartEnd(time.UnixMilli(start), time.UnixMilli(extent.Start)) requests = append(requests, r) } res, err := extent.toResponse() @@ -573,13 +573,13 @@ func (s resultsCache) partition(req Request, extents []Extent) ([]Request, []Res return nil, nil, err } // extract the overlap from the cached extent. - cachedResponses = append(cachedResponses, s.extractor.Extract(start, req.GetEnd(), res, extent.GetStart(), extent.GetEnd())) + cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd())) start = extent.End } // Lastly, make a request for any data missing at the end. - if start < req.GetEnd() { - r := req.WithStartEnd(start, req.GetEnd()) + if start < req.GetEnd().UnixMilli() { + r := req.WithStartEnd(time.UnixMilli(start), time.UnixMilli(end)) requests = append(requests, r) } diff --git a/pkg/querier/queryrange/queryrangebase/results_cache_test.go b/pkg/querier/queryrange/queryrangebase/results_cache_test.go index 93302bf42612d..bfe3ecea5f0b4 100644 --- a/pkg/querier/queryrange/queryrangebase/results_cache_test.go +++ b/pkg/querier/queryrange/queryrangebase/results_cache_test.go @@ -28,8 +28,8 @@ const ( var ( parsedRequest = &PrometheusRequest{ Path: "/api/v1/query_range", - Start: 1536673680 * 1e3, - End: 1536716898 * 1e3, + Start: time.UnixMilli(1536673680 * 1e3), + End: time.UnixMilli(1536716898 * 1e3), Step: 120 * 1e3, Query: "sum(container_memory_rss) by (namespace)", } @@ -41,8 +41,8 @@ var ( } noCacheRequest = &PrometheusRequest{ Path: "/api/v1/query_range", - Start: 1536673680 * 1e3, - End: 1536716898 * 1e3, + Start: time.UnixMilli(1536673680 * 1e3), + End: time.UnixMilli(1536716898 * 1e3), Step: 120 * 1e3, Query: "sum(container_memory_rss) by (namespace)", CachingOptions: CachingOptions{Disabled: true}, @@ -278,111 +278,111 @@ func TestShouldCache(t *testing.T) { // @ modifier on vector selectors. { name: "@ modifier on vector selector, before end, before maxCacheTime", - request: &PrometheusRequest{Query: "metric @ 123", End: 125000}, + request: &PrometheusRequest{Query: "metric @ 123", End: time.UnixMilli(125000)}, input: Response(&PrometheusResponse{}), expected: true, }, { name: "@ modifier on vector selector, after end, before maxCacheTime", - request: &PrometheusRequest{Query: "metric @ 127", End: 125000}, + request: &PrometheusRequest{Query: "metric @ 127", End: time.UnixMilli(125000)}, input: Response(&PrometheusResponse{}), expected: false, }, { name: "@ modifier on vector selector, before end, after maxCacheTime", - request: &PrometheusRequest{Query: "metric @ 151", End: 200000}, + request: &PrometheusRequest{Query: "metric @ 151", End: time.UnixMilli(200000)}, input: Response(&PrometheusResponse{}), expected: false, }, { name: "@ modifier on vector selector, after end, after maxCacheTime", - request: &PrometheusRequest{Query: "metric @ 151", End: 125000}, + request: &PrometheusRequest{Query: "metric @ 151", End: time.UnixMilli(125000)}, input: Response(&PrometheusResponse{}), expected: false, }, { name: "@ modifier on vector selector with start() before maxCacheTime", - request: &PrometheusRequest{Query: "metric @ start()", Start: 100000, End: 200000}, + request: &PrometheusRequest{Query: "metric @ start()", Start: time.UnixMilli(100000), End: time.UnixMilli(200000)}, input: Response(&PrometheusResponse{}), expected: true, }, { name: "@ modifier on vector selector with end() after maxCacheTime", - request: &PrometheusRequest{Query: "metric @ end()", Start: 100000, End: 200000}, + request: &PrometheusRequest{Query: "metric @ end()", Start: time.UnixMilli(100000), End: time.UnixMilli(200000)}, input: Response(&PrometheusResponse{}), expected: false, }, // @ modifier on matrix selectors. { name: "@ modifier on matrix selector, before end, before maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ 123)", End: 125000}, + request: &PrometheusRequest{Query: "rate(metric[5m] @ 123)", End: time.UnixMilli(125000)}, input: Response(&PrometheusResponse{}), expected: true, }, { name: "@ modifier on matrix selector, after end, before maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ 127)", End: 125000}, + request: &PrometheusRequest{Query: "rate(metric[5m] @ 127)", End: time.UnixMilli(125000)}, input: Response(&PrometheusResponse{}), expected: false, }, { name: "@ modifier on matrix selector, before end, after maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 200000}, + request: &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: time.UnixMilli(200000)}, input: Response(&PrometheusResponse{}), expected: false, }, { name: "@ modifier on matrix selector, after end, after maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: 125000}, + request: &PrometheusRequest{Query: "rate(metric[5m] @ 151)", End: time.UnixMilli(125000)}, input: Response(&PrometheusResponse{}), expected: false, }, { name: "@ modifier on matrix selector with start() before maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ start())", Start: 100000, End: 200000}, + request: &PrometheusRequest{Query: "rate(metric[5m] @ start())", Start: time.UnixMilli(100000), End: time.UnixMilli(200000)}, input: Response(&PrometheusResponse{}), expected: true, }, { name: "@ modifier on matrix selector with end() after maxCacheTime", - request: &PrometheusRequest{Query: "rate(metric[5m] @ end())", Start: 100000, End: 200000}, + request: &PrometheusRequest{Query: "rate(metric[5m] @ end())", Start: time.UnixMilli(100000), End: time.UnixMilli(200000)}, input: Response(&PrometheusResponse{}), expected: false, }, // @ modifier on subqueries. { name: "@ modifier on subqueries, before end, before maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 123)", End: 125000}, + request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 123)", End: time.UnixMilli(125000)}, input: Response(&PrometheusResponse{}), expected: true, }, { name: "@ modifier on subqueries, after end, before maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 127)", End: 125000}, + request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 127)", End: time.UnixMilli(125000)}, input: Response(&PrometheusResponse{}), expected: false, }, { name: "@ modifier on subqueries, before end, after maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 200000}, + request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: time.UnixMilli(200000)}, input: Response(&PrometheusResponse{}), expected: false, }, { name: "@ modifier on subqueries, after end, after maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: 125000}, + request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ 151)", End: time.UnixMilli(125000)}, input: Response(&PrometheusResponse{}), expected: false, }, { name: "@ modifier on subqueries with start() before maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ start())", Start: 100000, End: 200000}, + request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ start())", Start: time.UnixMilli(100000), End: time.UnixMilli(200000)}, input: Response(&PrometheusResponse{}), expected: true, }, { name: "@ modifier on subqueries with end() after maxCacheTime", - request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ end())", Start: 100000, End: 200000}, + request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] @ end())", Start: time.UnixMilli(100000), End: time.UnixMilli(200000)}, input: Response(&PrometheusResponse{}), expected: false, }, @@ -408,8 +408,8 @@ func TestPartition(t *testing.T) { { name: "Test a complete hit.", input: &PrometheusRequest{ - Start: 0, - End: 100, + Start: time.UnixMilli(0), + End: time.UnixMilli(100), }, prevCachedResponse: []Extent{ mkExtent(0, 100), @@ -422,32 +422,32 @@ func TestPartition(t *testing.T) { { name: "Test with a complete miss.", input: &PrometheusRequest{ - Start: 0, - End: 100, + Start: time.UnixMilli(0), + End: time.UnixMilli(100), }, prevCachedResponse: []Extent{ mkExtent(110, 210), }, expectedRequests: []Request{ &PrometheusRequest{ - Start: 0, - End: 100, + Start: time.UnixMilli(0), + End: time.UnixMilli(100), }, }, }, { name: "Test a partial hit.", input: &PrometheusRequest{ - Start: 0, - End: 100, + Start: time.UnixMilli(0), + End: time.UnixMilli(100), }, prevCachedResponse: []Extent{ mkExtent(50, 100), }, expectedRequests: []Request{ &PrometheusRequest{ - Start: 0, - End: 50, + Start: time.UnixMilli(0), + End: time.UnixMilli(50), }, }, expectedCachedResponse: []Response{ @@ -457,8 +457,8 @@ func TestPartition(t *testing.T) { { name: "Test multiple partial hits.", input: &PrometheusRequest{ - Start: 100, - End: 200, + Start: time.UnixMilli(100), + End: time.UnixMilli(200), }, prevCachedResponse: []Extent{ mkExtent(50, 120), @@ -466,8 +466,8 @@ func TestPartition(t *testing.T) { }, expectedRequests: []Request{ &PrometheusRequest{ - Start: 120, - End: 160, + Start: time.UnixMilli(120), + End: time.UnixMilli(160), }, }, expectedCachedResponse: []Response{ @@ -478,8 +478,8 @@ func TestPartition(t *testing.T) { { name: "Partial hits with tiny gap.", input: &PrometheusRequest{ - Start: 100, - End: 160, + Start: time.UnixMilli(100), + End: time.UnixMilli(160), }, prevCachedResponse: []Extent{ mkExtent(50, 120), @@ -487,8 +487,8 @@ func TestPartition(t *testing.T) { }, expectedRequests: []Request{ &PrometheusRequest{ - Start: 120, - End: 160, + Start: time.UnixMilli(120), + End: time.UnixMilli(160), }, }, expectedCachedResponse: []Response{ @@ -498,16 +498,16 @@ func TestPartition(t *testing.T) { { name: "Extent is outside the range and the request has a single step (same start and end).", input: &PrometheusRequest{ - Start: 100, - End: 100, + Start: time.UnixMilli(100), + End: time.UnixMilli(100), }, prevCachedResponse: []Extent{ mkExtent(50, 90), }, expectedRequests: []Request{ &PrometheusRequest{ - Start: 100, - End: 100, + Start: time.UnixMilli(100), + End: time.UnixMilli(100), }, }, }, @@ -515,8 +515,8 @@ func TestPartition(t *testing.T) { name: "Test when hit has a large step and only a single sample extent.", // If there is a only a single sample in the split interval, start and end will be the same. input: &PrometheusRequest{ - Start: 100, - End: 100, + Start: time.UnixMilli(100), + End: time.UnixMilli(100), }, prevCachedResponse: []Extent{ mkExtent(100, 100), @@ -549,8 +549,8 @@ func TestHandleHit(t *testing.T) { { name: "Should drop tiny extent that overlaps with non-tiny request only", input: &PrometheusRequest{ - Start: 100, - End: 120, + Start: time.UnixMilli(100), + End: time.UnixMilli(120), Step: 5, }, cachedEntry: []Extent{ @@ -570,8 +570,8 @@ func TestHandleHit(t *testing.T) { { name: "Should replace tiny extents that are cover by bigger request", input: &PrometheusRequest{ - Start: 100, - End: 200, + Start: time.UnixMilli(100), + End: time.UnixMilli(200), Step: 5, }, cachedEntry: []Extent{ @@ -594,8 +594,8 @@ func TestHandleHit(t *testing.T) { { name: "Should not drop tiny extent that completely overlaps with tiny request", input: &PrometheusRequest{ - Start: 100, - End: 105, + Start: time.UnixMilli(100), + End: time.UnixMilli(105), Step: 5, }, cachedEntry: []Extent{ @@ -609,8 +609,8 @@ func TestHandleHit(t *testing.T) { { name: "Should not drop tiny extent that partially center-overlaps with tiny request", input: &PrometheusRequest{ - Start: 106, - End: 108, + Start: time.UnixMilli(106), + End: time.UnixMilli(108), Step: 2, }, cachedEntry: []Extent{ @@ -623,8 +623,8 @@ func TestHandleHit(t *testing.T) { { name: "Should not drop tiny extent that partially left-overlaps with tiny request", input: &PrometheusRequest{ - Start: 100, - End: 106, + Start: time.UnixMilli(100), + End: time.UnixMilli(106), Step: 2, }, cachedEntry: []Extent{ @@ -641,8 +641,8 @@ func TestHandleHit(t *testing.T) { { name: "Should not drop tiny extent that partially right-overlaps with tiny request", input: &PrometheusRequest{ - Start: 100, - End: 106, + Start: time.UnixMilli(100), + End: time.UnixMilli(106), Step: 2, }, cachedEntry: []Extent{ @@ -659,8 +659,8 @@ func TestHandleHit(t *testing.T) { { name: "Should merge fragmented extents if request fills the hole", input: &PrometheusRequest{ - Start: 40, - End: 80, + Start: time.UnixMilli(40), + End: time.UnixMilli(80), Step: 20, }, cachedEntry: []Extent{ @@ -674,8 +674,8 @@ func TestHandleHit(t *testing.T) { { name: "Should left-extend extent if request starts earlier than extent in cache", input: &PrometheusRequest{ - Start: 40, - End: 80, + Start: time.UnixMilli(40), + End: time.UnixMilli(80), Step: 20, }, cachedEntry: []Extent{ @@ -688,8 +688,8 @@ func TestHandleHit(t *testing.T) { { name: "Should right-extend extent if request ends later than extent in cache", input: &PrometheusRequest{ - Start: 100, - End: 180, + Start: time.UnixMilli(100), + End: time.UnixMilli(180), Step: 20, }, cachedEntry: []Extent{ @@ -704,8 +704,8 @@ func TestHandleHit(t *testing.T) { input: &PrometheusRequest{ // This request is carefully crated such that cachedEntry is not used to fulfill // the request. - Start: 160, - End: 180, + Start: time.UnixMilli(160), + End: time.UnixMilli(180), Step: 20, }, cachedEntry: []Extent{ @@ -733,7 +733,7 @@ func TestHandleHit(t *testing.T) { merger: PrometheusCodec, parallelismForReq: func(_ context.Context, tenantIDs []string, r Request) int { return 1 }, next: HandlerFunc(func(_ context.Context, req Request) (Response, error) { - return mkAPIResponse(req.GetStart(), req.GetEnd(), req.GetStep()), nil + return mkAPIResponse(req.GetStart().UnixMilli(), req.GetEnd().UnixMilli(), req.GetStep()), nil }), } @@ -741,7 +741,7 @@ func TestHandleHit(t *testing.T) { response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0) require.NoError(t, err) - expectedResponse := mkAPIResponse(tc.input.GetStart(), tc.input.GetEnd(), tc.input.GetStep()) + expectedResponse := mkAPIResponse(tc.input.GetStart().UnixMilli(), tc.input.GetEnd().UnixMilli(), tc.input.GetStep()) require.Equal(t, expectedResponse, response, "response does not match the expectation") require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation") }) @@ -791,7 +791,7 @@ func TestResultsCache(t *testing.T) { require.Equal(t, parsedResponse, resp) // Doing request with new end time should do one more query. - req := parsedRequest.WithStartEnd(parsedRequest.GetStart(), parsedRequest.GetEnd()+100) + req := parsedRequest.WithStartEnd(parsedRequest.GetStart(), parsedRequest.GetEnd().Add(100*time.Millisecond)) _, err = rc.Do(ctx, req) require.NoError(t, err) require.Equal(t, 2, calls) @@ -820,7 +820,7 @@ func TestResultsCacheRecent(t *testing.T) { ) require.NoError(t, err) - req := parsedRequest.WithStartEnd(int64(model.Now())-(60*1e3), int64(model.Now())) + req := parsedRequest.WithStartEnd(time.Now().Add(-60*1e3*time.Millisecond), time.Now()) calls := 0 rc := rcm.Wrap(HandlerFunc(func(_ context.Context, r Request) (Response, error) { @@ -893,7 +893,7 @@ func TestResultsCacheMaxFreshness(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "1") // create request with start end within the key extents - req := parsedRequest.WithStartEnd(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3)) + req := parsedRequest.WithStartEnd(time.UnixMilli(int64(modelNow)-(50*1e3)), time.UnixMilli(int64(modelNow)-(10*1e3))) // fill cache key := constSplitter(day).GenerateCacheKey(context.Background(), "1", req) @@ -972,14 +972,14 @@ func TestConstSplitter_generateCacheKey(t *testing.T) { interval time.Duration want string }{ - {"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"}, - {"<30m", &PrometheusRequest{Start: toMs(10 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"}, - {"30m", &PrometheusRequest{Start: toMs(30 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:1"}, - {"91m", &PrometheusRequest{Start: toMs(91 * time.Minute), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:3"}, - {"0", &PrometheusRequest{Start: 0, Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"}, - {"<1d", &PrometheusRequest{Start: toMs(22 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"}, - {"4d", &PrometheusRequest{Start: toMs(4 * 24 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:4"}, - {"3d5h", &PrometheusRequest{Start: toMs(77 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:3"}, + {"0", &PrometheusRequest{Start: time.UnixMilli(0), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"}, + {"<30m", &PrometheusRequest{Start: time.UnixMilli(toMs(10 * time.Minute)), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:0"}, + {"30m", &PrometheusRequest{Start: time.UnixMilli(toMs(30 * time.Minute)), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:1"}, + {"91m", &PrometheusRequest{Start: time.UnixMilli(toMs(91 * time.Minute)), Step: 10, Query: "foo{}"}, 30 * time.Minute, "fake:foo{}:10:3"}, + {"0", &PrometheusRequest{Start: time.UnixMilli(0), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"}, + {"<1d", &PrometheusRequest{Start: time.UnixMilli(toMs(22 * time.Hour)), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:0"}, + {"4d", &PrometheusRequest{Start: time.UnixMilli(toMs(4 * 24 * time.Hour)), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:4"}, + {"3d5h", &PrometheusRequest{Start: time.UnixMilli(toMs(77 * time.Hour)), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:3"}, } for _, tt := range tests { t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) { diff --git a/pkg/querier/queryrange/queryrangebase/step_align.go b/pkg/querier/queryrange/queryrangebase/step_align.go index edef2d6f080df..7f61d41837168 100644 --- a/pkg/querier/queryrange/queryrangebase/step_align.go +++ b/pkg/querier/queryrange/queryrangebase/step_align.go @@ -2,6 +2,7 @@ package queryrangebase import ( "context" + "time" ) // StepAlignMiddleware aligns the start and end of request to the step to @@ -17,7 +18,7 @@ type stepAlign struct { } func (s stepAlign) Do(ctx context.Context, r Request) (Response, error) { - start := (r.GetStart() / r.GetStep()) * r.GetStep() - end := (r.GetEnd() / r.GetStep()) * r.GetStep() - return s.next.Do(ctx, r.WithStartEnd(start, end)) + start := (r.GetStart().UnixMilli() / r.GetStep()) * r.GetStep() + end := (r.GetEnd().UnixMilli() / r.GetStep()) * r.GetStep() + return s.next.Do(ctx, r.WithStartEnd(time.UnixMilli(start), time.UnixMilli(end))) } diff --git a/pkg/querier/queryrange/queryrangebase/step_align_test.go b/pkg/querier/queryrange/queryrangebase/step_align_test.go index d68a2080d4b25..cf1468e7c27ed 100644 --- a/pkg/querier/queryrange/queryrangebase/step_align_test.go +++ b/pkg/querier/queryrange/queryrangebase/step_align_test.go @@ -4,6 +4,7 @@ import ( "context" "strconv" "testing" + "time" "github.com/stretchr/testify/require" ) @@ -14,26 +15,26 @@ func TestStepAlign(t *testing.T) { }{ { input: &PrometheusRequest{ - Start: 0, - End: 100, + Start: time.UnixMilli(0), + End: time.UnixMilli(100), Step: 10, }, expected: &PrometheusRequest{ - Start: 0, - End: 100, + Start: time.UnixMilli(0), + End: time.UnixMilli(100), Step: 10, }, }, { input: &PrometheusRequest{ - Start: 2, - End: 102, + Start: time.UnixMilli(2), + End: time.UnixMilli(102), Step: 10, }, expected: &PrometheusRequest{ - Start: 0, - End: 100, + Start: time.UnixMilli(0), + End: time.UnixMilli(100), Step: 10, }, }, diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go index b2af68b55b783..143174439c159 100644 --- a/pkg/querier/queryrange/querysharding.go +++ b/pkg/querier/queryrange/querysharding.go @@ -149,7 +149,7 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que return ast.next.Do(ctx, r) } - conf, err := ast.confs.GetConf(int64(model.Time(r.GetStart()).Add(-maxRVDuration).Add(-maxOffset)), int64(model.Time(r.GetEnd()).Add(-maxOffset))) + conf, err := ast.confs.GetConf(int64(model.Time(r.GetStart().UnixMilli()).Add(-maxRVDuration).Add(-maxOffset)), int64(model.Time(r.GetEnd().UnixMilli()).Add(-maxOffset))) // cannot shard with this timerange if err != nil { level.Warn(logger).Log("err", err.Error(), "msg", "skipped AST mapper for request") @@ -173,7 +173,7 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que conf, ast.ng.Opts().MaxLookBackPeriod, ast.logger, - MinWeightedParallelism(ctx, tenants, ast.confs, ast.limits, model.Time(r.GetStart()), model.Time(r.GetEnd())), + MinWeightedParallelism(ctx, tenants, ast.confs, ast.limits, model.Time(r.GetStart().UnixMilli()), model.Time(r.GetEnd().UnixMilli())), ast.maxShards, r, ast.statsHandler, @@ -303,7 +303,7 @@ func (splitter *shardSplitter) Do(ctx context.Context, r queryrangebase.Request) cutoff := splitter.now().Add(-minShardingLookback) // Only attempt to shard queries which are older than the sharding lookback // (the period for which ingesters are also queried) or when the lookback is disabled. - if minShardingLookback == 0 || util.TimeFromMillis(r.GetEnd()).Before(cutoff) { + if minShardingLookback == 0 || util.TimeFromMillis(r.GetEnd().UnixMilli()).Before(cutoff) { return splitter.shardingware.Do(ctx, r) } return splitter.next.Do(ctx, r) @@ -400,7 +400,7 @@ type seriesShardingHandler struct { } func (ss *seriesShardingHandler) Do(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { - conf, err := ss.confs.GetConf(r.GetStart(), r.GetEnd()) + conf, err := ss.confs.GetConf(r.GetStart().UnixMilli(), r.GetEnd().UnixMilli()) // cannot shard with this timerange if err != nil { level.Warn(ss.logger).Log("err", err.Error(), "msg", "skipped sharding for request") @@ -433,7 +433,7 @@ func (ss *seriesShardingHandler) Do(ctx context.Context, r queryrangebase.Reques ctx, ss.next, requests, - MinWeightedParallelism(ctx, tenantIDs, ss.confs, ss.limits, model.Time(req.GetStart()), model.Time(req.GetEnd())), + MinWeightedParallelism(ctx, tenantIDs, ss.confs, ss.limits, model.Time(req.GetStart().UnixMilli()), model.Time(req.GetEnd().UnixMilli())), ) if err != nil { return nil, err diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go index e3e83f967ac04..79d789b1ac4b6 100644 --- a/pkg/querier/queryrange/querysharding_test.go +++ b/pkg/querier/queryrange/querysharding_test.go @@ -85,10 +85,7 @@ var ( ) func Test_shardSplitter(t *testing.T) { - req := defaultReq().WithStartEnd( - util.TimeToMillis(start), - util.TimeToMillis(end), - ) + req := defaultReq().WithStartEnd(start, end) for _, tc := range []struct { desc string @@ -812,7 +809,7 @@ func Test_ASTMapper_MaxLookBackPeriod(t *testing.T) { statsHandler := queryrangebase.HandlerFunc(func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { // This is the actual check that we're testing. - require.Equal(t, testTime.Add(-engineOpts.MaxLookBackPeriod).UnixMilli(), req.GetStart()) + require.Equal(t, testTime.Add(-engineOpts.MaxLookBackPeriod).UnixMilli(), req.GetStart().UnixMilli()) return &IndexStatsResponse{ Response: &logproto.IndexStatsResponse{ diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 9c409d14a5a9f..91c098dd933dc 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -605,8 +605,8 @@ func NewMetricTripperware( tenantIDs, schema.Configs, limits, - model.Time(r.GetStart()), - model.Time(r.GetEnd()), + model.Time(r.GetStart().UnixMilli()), + model.Time(r.GetEnd().UnixMilli()), ) }, retentionEnabled, @@ -775,8 +775,8 @@ func NewVolumeTripperware( tenantIDs, schema.Configs, limits, - model.Time(r.GetStart()), - model.Time(r.GetEnd()), + model.Time(r.GetStart().UnixMilli()), + model.Time(r.GetEnd().UnixMilli()), ) }, retentionEnabled, @@ -876,8 +876,8 @@ func NewIndexStatsTripperware( tenantIDs, schema.Configs, limits, - model.Time(r.GetStart()), - model.Time(r.GetEnd()), + model.Time(r.GetStart().UnixMilli()), + model.Time(r.GetEnd().UnixMilli()), ) }, retentionEnabled, diff --git a/pkg/querier/queryrange/shard_resolver.go b/pkg/querier/queryrange/shard_resolver.go index 5cf2c83ae6bdf..aed0e96e2b47d 100644 --- a/pkg/querier/queryrange/shard_resolver.go +++ b/pkg/querier/queryrange/shard_resolver.go @@ -43,8 +43,8 @@ func shardResolverForConf( logger: logger, handler: handler, limits: limits, - from: model.Time(r.GetStart()), - through: model.Time(r.GetEnd()), + from: model.Time(r.GetStart().UnixMilli()), + through: model.Time(r.GetEnd().UnixMilli()), maxParallelism: maxParallelism, maxShards: maxShards, defaultLookback: defaultLookback, diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index 69bca0a4e6523..97045066f48b5 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -230,7 +230,7 @@ func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (que maxSeriesCapture := func(id string) int { return h.limits.MaxQuerySeries(ctx, id) } maxSeries := validation.SmallestPositiveIntPerTenant(tenantIDs, maxSeriesCapture) - maxParallelism := MinWeightedParallelism(ctx, tenantIDs, h.configs, h.limits, model.Time(r.GetStart()), model.Time(r.GetEnd())) + maxParallelism := MinWeightedParallelism(ctx, tenantIDs, h.configs, h.limits, model.Time(r.GetStart().UnixMilli()), model.Time(r.GetEnd().UnixMilli())) resps, err := h.Process(ctx, maxParallelism, limit, input, maxSeries) if err != nil { return nil, err @@ -276,8 +276,8 @@ func splitByTime(req queryrangebase.Request, interval time.Duration) ([]queryran reqs = append(reqs, NewLabelRequest(start, end, r.Query, r.Name, r.Path())) }) case *logproto.IndexStatsRequest: - startTS := model.Time(r.GetStart()).Time() - endTS := model.Time(r.GetEnd()).Time() + startTS := r.GetStart() + endTS := r.GetEnd() util.ForInterval(interval, startTS, endTS, true, func(start, end time.Time) { reqs = append(reqs, &logproto.IndexStatsRequest{ From: model.TimeFromUnix(start.Unix()), @@ -286,8 +286,8 @@ func splitByTime(req queryrangebase.Request, interval time.Duration) ([]queryran }) }) case *logproto.VolumeRequest: - startTS := model.Time(r.GetStart()).Time() - endTS := model.Time(r.GetEnd()).Time() + startTS := r.GetStart() + endTS := r.GetEnd() util.ForInterval(interval, startTS, endTS, true, func(start, end time.Time) { reqs = append(reqs, &logproto.VolumeRequest{ From: model.TimeFromUnix(start.Unix()), @@ -363,7 +363,7 @@ func splitMetricByTime(r queryrangebase.Request, interval time.Duration) ([]quer } end := time.Unix(0, endNs) - lokiReq = lokiReq.WithStartEnd(util.TimeToMillis(start), util.TimeToMillis(end)).(*LokiRequest) + lokiReq = lokiReq.WithStartEnd(start, end).(*LokiRequest) // step is >= configured split interval, let us just split the query interval by step if lokiReq.Step >= interval.Milliseconds() { diff --git a/pkg/querier/queryrange/volume_cache.go b/pkg/querier/queryrange/volume_cache.go index 5fa84840f7fcc..0c54745654004 100644 --- a/pkg/querier/queryrange/volume_cache.go +++ b/pkg/querier/queryrange/volume_cache.go @@ -91,7 +91,7 @@ func shouldCacheVolume(ctx context.Context, req queryrangebase.Request, lim Limi maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture) now := volumeCacheMiddlewareNowTimeFunc() - return maxCacheFreshness == 0 || model.Time(req.GetEnd()).Before(now.Add(-maxCacheFreshness)), nil + return maxCacheFreshness == 0 || model.Time(req.GetEnd().UnixMilli()).Before(now.Add(-maxCacheFreshness)), nil } func NewVolumeCacheMiddleware( diff --git a/pkg/querier/queryrange/volume_cache_test.go b/pkg/querier/queryrange/volume_cache_test.go index f1e3a30d43771..009302783b554 100644 --- a/pkg/querier/queryrange/volume_cache_test.go +++ b/pkg/querier/queryrange/volume_cache_test.go @@ -116,7 +116,7 @@ func TestVolumeCache(t *testing.T) { // The new start time is 15m (i.e. 25%) in the future with regard to the previous request time span. *calls = 0 - req := volReq.WithStartEnd(volReq.GetStart()+(15*time.Minute).Milliseconds(), volReq.GetEnd()+(15*time.Minute).Milliseconds()) + req := volReq.WithStartEnd(volReq.GetStart().Add(15*time.Minute), volReq.GetEnd().Add(15*time.Minute)) vol := float64(0.75) expectedVol := &VolumeResponse{ Response: &logproto.VolumeResponse{ From 7fa2f6ea9ef451a4f78a3db0612d6093266f620f Mon Sep 17 00:00:00 2001 From: Tianfeng Wang Date: Wed, 25 Oct 2023 21:51:47 +0100 Subject: [PATCH 22/33] Add span logFields: wait_goroutine_capacity_time_ms and min_weighted_parallism (#10846) **What this PR does / why we need it**: https://github.com/grafana/loki/issues/5316 --- pkg/querier/queryrange/limits.go | 43 ++++++++++++++++++-- pkg/querier/queryrange/limits_test.go | 56 +++++++++++++++++++++++++++ 2 files changed, 95 insertions(+), 4 deletions(-) diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go index 82bef4bf958a9..b6f5c4d51fb33 100644 --- a/pkg/querier/queryrange/limits.go +++ b/pkg/querier/queryrange/limits.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/dskit/tenant" "github.com/opentracing/opentracing-go" + otlog "github.com/opentracing/opentracing-go/log" "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -452,6 +453,27 @@ func NewLimitedRoundTripper(next queryrangebase.Handler, limits Limits, configs return transport } +type SemaphoreWithTiming struct { + sem *semaphore.Weighted +} + +func NewSemaphoreWithTiming(max int64) *SemaphoreWithTiming { + return &SemaphoreWithTiming{ + sem: semaphore.NewWeighted(max), + } +} + +// acquires the semaphore and records the time it takes. +func (s *SemaphoreWithTiming) Acquire(ctx context.Context, n int64) (time.Duration, error) { + start := time.Now() + + if err := s.sem.Acquire(ctx, n); err != nil { + return 0, err + } + + return time.Since(start), nil +} + func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Request) (queryrangebase.Response, error) { var ( ctx, cancel = context.WithCancel(c) @@ -460,9 +482,12 @@ func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Reque cancel() }() - if span := opentracing.SpanFromContext(ctx); span != nil { + span := opentracing.SpanFromContext(ctx) + + if span != nil { request.LogToSpan(span) } + tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) @@ -481,7 +506,7 @@ func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Reque return nil, httpgrpc.Errorf(http.StatusTooManyRequests, ErrMaxQueryParalellism.Error()) } - sem := semaphore.NewWeighted(int64(parallelism)) + semWithTiming := NewSemaphoreWithTiming(int64(parallelism)) return rt.middleware.Wrap( queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { @@ -492,10 +517,20 @@ func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Reque // the thousands. // Note: It is the responsibility of the caller to run // the handler in parallel. - if err := sem.Acquire(ctx, int64(1)); err != nil { + elapsed, err := semWithTiming.Acquire(ctx, int64(1)) + + if err != nil { return nil, fmt.Errorf("could not acquire work: %w", err) } - defer sem.Release(int64(1)) + + if span != nil { + span.LogFields( + otlog.String("wait_time", elapsed.String()), + otlog.Int64("max_parallelism", int64(parallelism)), + ) + } + + defer semWithTiming.sem.Release(int64(1)) return rt.next.Do(ctx, r) })).Do(ctx, request) diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index 4ab81ec4acd3b..24253892cab19 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -623,3 +623,59 @@ func Test_MaxQuerySize_MaxLookBackPeriod(t *testing.T) { }) } } + +func TestAcquireWithTiming(t *testing.T) { + + ctx := context.Background() + sem := NewSemaphoreWithTiming(2) + + // Channel to collect waiting times + waitingTimes := make(chan struct { + GoroutineID int + WaitingTime int64 + }, 3) + + tryAcquire := func(n int64, goroutineID int) { + elapsed, err := sem.Acquire(ctx, n) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + waitingTimes <- struct { + GoroutineID int + WaitingTime int64 + }{goroutineID, elapsed.Milliseconds()} + + defer sem.sem.Release(n) + + time.Sleep(10 * time.Millisecond) + } + + go tryAcquire(1, 1) + go tryAcquire(1, 2) + + // Sleep briefly to allow the first two goroutines to start running + time.Sleep(5 * time.Millisecond) + + go tryAcquire(1, 3) + + // Collect and sort waiting times + var waitingDurations []struct { + GoroutineID int + WaitingTime int64 + } + for i := 0; i < 3; i++ { + waitingDurations = append(waitingDurations, <-waitingTimes) + } + // Find and check the waiting time for the third goroutine + var waiting3 int64 + for _, waiting := range waitingDurations { + if waiting.GoroutineID == 3 { + waiting3 = waiting.WaitingTime + break + } + } + + // Check that the waiting time for the third request is larger than 0 milliseconds and less than or equal to 10-5=5 milliseconds + require.Greater(t, waiting3, 0*time.Millisecond) + require.LessOrEqual(t, waiting3, 5*time.Millisecond) +} From 0695424f7dd62435df3a9981276b40f3c5ef5641 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 26 Oct 2023 10:30:31 +0200 Subject: [PATCH 23/33] fix(deps): update module google.golang.org/grpc [security] (main) (#11031) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [google.golang.org/grpc](https://togithub.com/grpc/grpc-go) | require | minor | `v1.53.0` -> `v1.56.3` | | [google.golang.org/grpc](https://togithub.com/grpc/grpc-go) | require | patch | `v1.58.2` -> `v1.58.3` | ### GitHub Vulnerability Alerts #### [GHSA-m425-mq94-257g](https://togithub.com/grpc/grpc-go/security/advisories/GHSA-m425-mq94-257g) ### Impact In affected releases of gRPC-Go, it is possible for an attacker to send HTTP/2 requests, cancel them, and send subsequent requests, which is valid by the HTTP/2 protocol, but would cause the gRPC-Go server to launch more concurrent method handlers than the configured maximum stream limit. ### Patches This vulnerability was addressed by #​6703 and has been included in patch releases: 1.56.3, 1.57.1, 1.58.3. It is also included in the latest release, 1.59.0. Along with applying the patch, users should also ensure they are using the `grpc.MaxConcurrentStreams` server option to apply a limit to the server's resources used for any single connection. ### Workarounds None. ### References #​6703 --- ### Release Notes
grpc/grpc-go (google.golang.org/grpc) ### [`v1.56.3`](https://togithub.com/grpc/grpc-go/releases/tag/v1.56.3): Release 1.56.3 [Compare Source](https://togithub.com/grpc/grpc-go/compare/v1.56.2...v1.56.3) ### Security - server: prohibit more than MaxConcurrentStreams handlers from running at once (CVE-2023-44487) In addition to this change, applications should ensure they do not leave running tasks behind related to the RPC before returning from method handlers, or should enforce appropriate limits on any such work. ### [`v1.56.2`](https://togithub.com/grpc/grpc-go/releases/tag/v1.56.2): Release 1.56.2 [Compare Source](https://togithub.com/grpc/grpc-go/compare/v1.56.1...v1.56.2) - status: To fix a panic, `status.FromError` now returns an error with `codes.Unknown` when the error implements the `GRPCStatus()` method, and calling `GRPCStatus()` returns `nil`. ([#​6374](https://togithub.com/grpc/grpc-go/issues/6374)) ### [`v1.56.1`](https://togithub.com/grpc/grpc-go/releases/tag/v1.56.1): Release 1.56.1 [Compare Source](https://togithub.com/grpc/grpc-go/compare/v1.56.0...v1.56.1) - client: handle empty address lists correctly in addrConn.updateAddrs ### [`v1.56.0`](https://togithub.com/grpc/grpc-go/releases/tag/v1.56.0): Release 1.56.0 [Compare Source](https://togithub.com/grpc/grpc-go/compare/v1.55.1...v1.56.0) ### New Features - client: support channel idleness using `WithIdleTimeout` dial option ([#​6263](https://togithub.com/grpc/grpc-go/issues/6263)) - This feature is currently disabled by default, but will be enabled with a 30 minute default in the future. - client: when using pickfirst, keep channel state in TRANSIENT_FAILURE until it becomes READY ([gRFC A62](https://togithub.com/grpc/proposal/blob/master/A62-pick-first.md)) ([#​6306](https://togithub.com/grpc/grpc-go/issues/6306)) - xds: Add support for Custom LB Policies ([gRFC A52](https://togithub.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md)) ([#​6224](https://togithub.com/grpc/grpc-go/issues/6224)) - xds: support pick_first Custom LB policy ([gRFC A62](https://togithub.com/grpc/proposal/blob/master/A62-pick-first.md)) ([#​6314](https://togithub.com/grpc/grpc-go/issues/6314)) ([#​6317](https://togithub.com/grpc/grpc-go/issues/6317)) - client: add support for pickfirst address shuffling ([gRFC A62](https://togithub.com/grpc/proposal/blob/master/A62-pick-first.md)) ([#​6311](https://togithub.com/grpc/grpc-go/issues/6311)) - xds: Add support for String Matcher Header Matcher in RDS ([#​6313](https://togithub.com/grpc/grpc-go/issues/6313)) - xds/outlierdetection: Add Channelz Logger to Outlier Detection LB ([#​6145](https://togithub.com/grpc/grpc-go/issues/6145)) - Special Thanks: [@​s-matyukevich](https://togithub.com/s-matyukevich) - xds: enable RLS in xDS by default ([#​6343](https://togithub.com/grpc/grpc-go/issues/6343)) - orca: add support for application_utilization field and missing range checks on several metrics setters - balancer/weightedroundrobin: add new LB policy for balancing between backends based on their load reports ([gRFC A58](https://togithub.com/grpc/proposal/blob/master/A58-client-side-weighted-round-robin-lb-policy.md)) ([#​6241](https://togithub.com/grpc/grpc-go/issues/6241)) - authz: add conversion of json to RBAC Audit Logging config ([#​6192](https://togithub.com/grpc/grpc-go/issues/6192)) - authz: add support for stdout logger ([#​6230](https://togithub.com/grpc/grpc-go/issues/6230) and [#​6298](https://togithub.com/grpc/grpc-go/issues/6298)) - authz: support customizable audit functionality for authorization policy ([#​6192](https://togithub.com/grpc/grpc-go/issues/6192) [#​6230](https://togithub.com/grpc/grpc-go/issues/6230) [#​6298](https://togithub.com/grpc/grpc-go/issues/6298) [#​6158](https://togithub.com/grpc/grpc-go/issues/6158) [#​6304](https://togithub.com/grpc/grpc-go/issues/6304) and [#​6225](https://togithub.com/grpc/grpc-go/issues/6225)) ### Bug Fixes - orca: fix a race at startup of out-of-band metric subscriptions that would cause the report interval to request 0 ([#​6245](https://togithub.com/grpc/grpc-go/issues/6245)) - xds/xdsresource: Fix Outlier Detection Config Handling and correctly set xDS Defaults ([#​6361](https://togithub.com/grpc/grpc-go/issues/6361)) - xds/outlierdetection: Fix Outlier Detection Config Handling by setting defaults in ParseConfig() ([#​6361](https://togithub.com/grpc/grpc-go/issues/6361)) ### API Changes - orca: allow a ServerMetricsProvider to be passed to the ORCA service and ServerOption ([#​6223](https://togithub.com/grpc/grpc-go/issues/6223)) ### [`v1.55.1`](https://togithub.com/grpc/grpc-go/releases/tag/v1.55.1): Release 1.55.1 [Compare Source](https://togithub.com/grpc/grpc-go/compare/v1.55.0...v1.55.1) - status: To fix a panic, `status.FromError` now returns an error with `codes.Unknown` when the error implements the `GRPCStatus()` method, and calling `GRPCStatus()` returns `nil`. ([#​6374](https://togithub.com/grpc/grpc-go/issues/6374)) ### [`v1.55.0`](https://togithub.com/grpc/grpc-go/releases/tag/v1.55.0): Release 1.55.0 [Compare Source](https://togithub.com/grpc/grpc-go/compare/v1.54.1...v1.55.0) ### Behavior Changes - xds: enable federation support by default ([#​6151](https://togithub.com/grpc/grpc-go/issues/6151)) - status: `status.Code` and `status.FromError` handle wrapped errors ([#​6031](https://togithub.com/grpc/grpc-go/issues/6031) and [#​6150](https://togithub.com/grpc/grpc-go/issues/6150)) - Special Thanks: [@​psyhatter](https://togithub.com/psyhatter) ### New Features - xds/xdsclient: support `ignore_resource_deletion` server feature as per gRFC [A53](https://togithub.com/grpc/proposal/blob/master/A53-xds-ignore-resource-deletion.md) ([#​6035](https://togithub.com/grpc/grpc-go/issues/6035)) - security/advancedtls: add min/max TLS version selection options ([#​6007](https://togithub.com/grpc/grpc-go/issues/6007)) - Special Thanks: [@​joeljeske](https://togithub.com/joeljeske) ### Bug Fixes - xds: stop routing RPCs to deleted clusters ([#​6125](https://togithub.com/grpc/grpc-go/issues/6125)) - client: fix race between stream creation and GOAWAY receipt, which could lead to spurious UNAVAILABLE stream errors ([#​6142](https://togithub.com/grpc/grpc-go/issues/6142)) ### Performance Improvements - server: improve stream handler goroutine worker allocation when [`NumStreamWorkers`](https://pkg.go.dev/google.golang.org/grpc#NumStreamWorkers) is used ([#​6004](https://togithub.com/grpc/grpc-go/issues/6004)) - Special Thanks: [@​SaveTheRbtz](https://togithub.com/SaveTheRbtz) ### [`v1.54.1`](https://togithub.com/grpc/grpc-go/releases/tag/v1.54.1): Release 1.54.1 [Compare Source](https://togithub.com/grpc/grpc-go/compare/v1.54.0...v1.54.1) ### Bug Fixes - credentials/alts: revert a change that causes a crash in the handshaker ### [`v1.54.0`](https://togithub.com/grpc/grpc-go/releases/tag/v1.54.0): Release 1.54.0 [Compare Source](https://togithub.com/grpc/grpc-go/compare/v1.53.0...v1.54.0) ### Behavior Changes - xds: remove support for xDS v2 transport API ([#​6013](https://togithub.com/grpc/grpc-go/issues/6013)) ### New Features - server: expose `SetSendCompressor` API to set send compressor name ([#​5744](https://togithub.com/grpc/grpc-go/issues/5744)) - Special Thanks: [@​jronak](https://togithub.com/jronak) - xdsclient: include `Node` proto only in the first discovery request message, to improve performance ([#​6078](https://togithub.com/grpc/grpc-go/issues/6078)) ### Bug Fixes - metadata: fix validation logic and properly validate metadata appended via `AppendToOutgoingContext` ([#​6001](https://togithub.com/grpc/grpc-go/issues/6001)) - Special Thanks: [@​ktalg](https://togithub.com/ktalg) - transport: do not close connections when we encounter I/O errors until after all data is consumed ([#​6110](https://togithub.com/grpc/grpc-go/issues/6110)) - ringhash: ensure addresses are consistently hashed across updates ([#​6066](https://togithub.com/grpc/grpc-go/issues/6066)) - xds/clusterimpl: fix a bug causing unnecessary closing and re-opening of LRS streams ([#​6112](https://togithub.com/grpc/grpc-go/issues/6112)) - xds: NACK route configuration if sum of weights of weighted clusters exceeds uint32\_max ([#​6085](https://togithub.com/grpc/grpc-go/issues/6085)) ### Documentation - resolver: update `Resolver.Scheme()` docstring to mention requirement of lowercase scheme names ([#​6014](https://togithub.com/grpc/grpc-go/issues/6014)) - resolver: document expected error handling of `UpdateState` errors ([#​6002](https://togithub.com/grpc/grpc-go/issues/6002)) - Special Thanks: [@​fho](https://togithub.com/fho) - examples: add example for ORCA load reporting ([#​6114](https://togithub.com/grpc/grpc-go/issues/6114)) - examples: add an example to illustrate authorization (authz) support ([#​5920](https://togithub.com/grpc/grpc-go/issues/5920)) - Special Thanks: [@​KenxinKun](https://togithub.com/KenxinKun)
--- ### Configuration 📅 **Schedule**: Branch creation - "" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://togithub.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/grafana/loki). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 +- pkg/push/go.mod | 6 +- pkg/push/go.sum | 18 ++--- .../grpc/internal/transport/http2_server.go | 11 +-- vendor/google.golang.org/grpc/server.go | 71 +++++++++++++------ vendor/google.golang.org/grpc/version.go | 2 +- vendor/modules.txt | 2 +- 8 files changed, 67 insertions(+), 49 deletions(-) diff --git a/go.mod b/go.mod index 77228a1eaa12d..ddf8b367742f5 100644 --- a/go.mod +++ b/go.mod @@ -104,7 +104,7 @@ require ( golang.org/x/sys v0.13.0 golang.org/x/time v0.3.0 google.golang.org/api v0.132.0 - google.golang.org/grpc v1.58.2 + google.golang.org/grpc v1.58.3 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 diff --git a/go.sum b/go.sum index 74f9d98ad4fc1..a141a9213dd6a 100644 --- a/go.sum +++ b/go.sum @@ -2518,8 +2518,8 @@ google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= -google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/pkg/push/go.mod b/pkg/push/go.mod index 202c37781699e..f0eaf486ab343 100644 --- a/pkg/push/go.mod +++ b/pkg/push/go.mod @@ -6,7 +6,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/stretchr/testify v1.8.2 golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b - google.golang.org/grpc v1.53.0 + google.golang.org/grpc v1.56.3 ) require ( @@ -17,8 +17,8 @@ require ( golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/protobuf v1.29.1 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/push/go.sum b/pkg/push/go.sum index 2abbf99450f81..d2d95b4bd9320 100644 --- a/pkg/push/go.sum +++ b/pkg/push/go.sum @@ -39,8 +39,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -49,14 +47,10 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -67,14 +61,14 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 8d3a353c1d581..c06db679d89cc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -171,15 +171,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { + if config.MaxStreams != math.MaxUint32 { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, + Val: config.MaxStreams, }) } dynamicWindow := true @@ -258,7 +253,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), - maxStreams: maxStreams, + maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 244123c6c5a89..eeae92fbe0204 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -115,12 +115,6 @@ type serviceInfo struct { mdata any } -type serverWorkerData struct { - st transport.ServerTransport - wg *sync.WaitGroup - stream *transport.Stream -} - // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions @@ -145,7 +139,7 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan *serverWorkerData + serverWorkerChannel chan func() } type serverOptions struct { @@ -179,6 +173,7 @@ type serverOptions struct { } var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, @@ -404,6 +399,9 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -605,24 +603,19 @@ const serverWorkerResetThreshold = 1 << 16 // [1] https://github.com/golang/go/issues/18138 func (s *Server) serverWorker() { for completed := 0; completed < serverWorkerResetThreshold; completed++ { - data, ok := <-s.serverWorkerChannel + f, ok := <-s.serverWorkerChannel if !ok { return } - s.handleSingleStream(data) + f() } go s.serverWorker() } -func (s *Server) handleSingleStream(data *serverWorkerData) { - defer data.wg.Done() - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) -} - // initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannel = make(chan *serverWorkerData) + s.serverWorkerChannel = make(chan func()) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } @@ -982,21 +975,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) + + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + } + if s.opts.numServerWorkers > 0 { - data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannel <- data: + case s.serverWorkerChannel <- f: return default: // If all stream workers are busy, fallback to the default code path. } } - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() + go f() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -2091,3 +2089,34 @@ func validateSendCompressor(name, clientCompressors string) error { } return fmt.Errorf("client does not support compressor %q", name) } + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index d3f5bcbfcef8b..724ad21021300 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.58.2" +const Version = "1.58.3" diff --git a/vendor/modules.txt b/vendor/modules.txt index 24ea181a4b1b9..8e76eb21adbf8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1747,7 +1747,7 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.58.2 +# google.golang.org/grpc v1.58.3 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes From c66ffd125cd89f5845a75a1751186fa46d003f70 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 26 Oct 2023 10:33:30 +0200 Subject: [PATCH 24/33] fix(deps): update github.com/c2h5oh/datasize digest to 859f65c (main) (#10820) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [github.com/c2h5oh/datasize](https://togithub.com/c2h5oh/datasize) | require | digest | `28bbd47` -> `859f65c` | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/grafana/loki). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/c2h5oh/datasize/README.md | 2 +- vendor/github.com/c2h5oh/datasize/datasize.go | 22 +++++++++++++++++++ vendor/modules.txt | 2 +- 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index ddf8b367742f5..efad30c6ba83b 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/baidubce/bce-sdk-go v0.9.141 github.com/bmatcuk/doublestar v1.3.4 github.com/buger/jsonparser v1.1.1 - github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee + github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash/v2 v2.2.0 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf diff --git a/go.sum b/go.sum index a141a9213dd6a..465a01a6ac13a 100644 --- a/go.sum +++ b/go.sum @@ -387,8 +387,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I= -github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/caddyserver/caddy v1.0.4/go.mod h1:uruyfVsyMcDb3IOzSKsi1x0wOjy1my/PxOSTcD+24jM= github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= diff --git a/vendor/github.com/c2h5oh/datasize/README.md b/vendor/github.com/c2h5oh/datasize/README.md index ac0cf8586e9f2..f6e828587f007 100644 --- a/vendor/github.com/c2h5oh/datasize/README.md +++ b/vendor/github.com/c2h5oh/datasize/README.md @@ -19,7 +19,7 @@ Just like `time` package provides `time.Second`, `time.Day` constants `datasize` Just like `time` package provides `duration.Nanoseconds() uint64 `, `duration.Hours() float64` helpers `datasize` has. * `ByteSize.Bytes() uint64` -* `ByteSize.Kilobytes() float4` +* `ByteSize.Kilobytes() float64` * `ByteSize.Megabytes() float64` * `ByteSize.Gigabytes() float64` * `ByteSize.Terabytes() float64` diff --git a/vendor/github.com/c2h5oh/datasize/datasize.go b/vendor/github.com/c2h5oh/datasize/datasize.go index 6754788162496..2ce762751c08f 100644 --- a/vendor/github.com/c2h5oh/datasize/datasize.go +++ b/vendor/github.com/c2h5oh/datasize/datasize.go @@ -215,3 +215,25 @@ BitsError: *b = 0 return &strconv.NumError{fnUnmarshalText, string(t0), ErrBits} } + +func Parse(t []byte) (ByteSize, error) { + var v ByteSize + err := v.UnmarshalText(t) + return v, err +} + +func MustParse(t []byte) ByteSize { + v, err := Parse(t) + if err != nil { + panic(err) + } + return v +} + +func ParseString(s string) (ByteSize, error) { + return Parse([]byte(s)) +} + +func MustParseString(s string) ByteSize { + return MustParse([]byte(s)) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8e76eb21adbf8..97cf4c4cc6365 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -423,7 +423,7 @@ github.com/bmatcuk/doublestar # github.com/buger/jsonparser v1.1.1 ## explicit; go 1.13 github.com/buger/jsonparser -# github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee +# github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b ## explicit github.com/c2h5oh/datasize # github.com/census-instrumentation/opencensus-proto v0.4.1 From 1fe48858ae15b33646eedb85b05d6773a8bc5020 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 26 Oct 2023 10:38:31 +0200 Subject: [PATCH 25/33] fix(deps): update github.com/joncrlsn/dque digest to c2ef48c (main) (#10947) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [github.com/joncrlsn/dque](https://togithub.com/joncrlsn/dque) | require | digest | `956d141` -> `c2ef48c` | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/grafana/loki). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 5 ++-- vendor/github.com/joncrlsn/dque/queue.go | 35 +++++++++++++++++------- vendor/modules.txt | 4 +-- 4 files changed, 31 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index efad30c6ba83b..fd6104c1c40bb 100644 --- a/go.mod +++ b/go.mod @@ -62,7 +62,7 @@ require ( github.com/influxdata/go-syslog/v3 v3.0.1-0.20201128200927-a1889d947b48 github.com/influxdata/telegraf v1.16.3 github.com/jmespath/go-jmespath v0.4.0 - github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible + github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.16.7 github.com/klauspost/pgzip v1.2.5 diff --git a/go.sum b/go.sum index 465a01a6ac13a..792b91d419e70 100644 --- a/go.sum +++ b/go.sum @@ -802,6 +802,7 @@ github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBT github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -1176,8 +1177,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible h1:f4ZGkY12AQ+YvzWDDWMLMGejA4ceg7nIPlqJ9fQ9T4c= -github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible/go.mod h1:hDZb8oMj3Kp8MxtbNLg9vrtAUDHjgI1yZvqivT4O8Iw= +github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a h1:sfe532Ipn7GX0V6mHdynBk393rDmqgI0QmjLK7ct7TU= +github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a/go.mod h1:dNKs71rs2VJGBAmttu7fouEsRQlRjxy0p1Sx+T5wbpY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= diff --git a/vendor/github.com/joncrlsn/dque/queue.go b/vendor/github.com/joncrlsn/dque/queue.go index 317abc141cff5..fb701cf9c74da 100644 --- a/vendor/github.com/joncrlsn/dque/queue.go +++ b/vendor/github.com/joncrlsn/dque/queue.go @@ -60,8 +60,7 @@ type DQue struct { mutex sync.Mutex - emptyCond *sync.Cond - mutexEmptyCond sync.Mutex + emptyCond *sync.Cond turbo bool } @@ -92,13 +91,17 @@ func New(name string, dirPath string, itemsPerSegment int, builder func() interf q.fullPath = fullPath q.config.ItemsPerSegment = itemsPerSegment q.builder = builder - q.emptyCond = sync.NewCond(&q.mutexEmptyCond) + q.emptyCond = sync.NewCond(&q.mutex) if err := q.lock(); err != nil { return nil, err } if err := q.load(); err != nil { + er := q.fileLock.Unlock() + if er != nil { + return nil, er + } return nil, err } @@ -127,13 +130,17 @@ func Open(name string, dirPath string, itemsPerSegment int, builder func() inter q.fullPath = fullPath q.config.ItemsPerSegment = itemsPerSegment q.builder = builder - q.emptyCond = sync.NewCond(&q.mutexEmptyCond) + q.emptyCond = sync.NewCond(&q.mutex) if err := q.lock(); err != nil { return nil, err } if err := q.load(); err != nil { + er := q.fileLock.Unlock() + if er != nil { + return nil, er + } return nil, err } @@ -241,6 +248,10 @@ func (q *DQue) Dequeue() (interface{}, error) { q.mutex.Lock() defer q.mutex.Unlock() + return q.dequeueLocked() +} + +func (q *DQue) dequeueLocked() (interface{}, error) { if q.fileLock == nil { return nil, ErrQueueClosed } @@ -305,6 +316,10 @@ func (q *DQue) Peek() (interface{}, error) { q.mutex.Lock() defer q.mutex.Unlock() + return q.peekLocked() +} + +func (q *DQue) peekLocked() (interface{}, error) { if q.fileLock == nil { return nil, ErrQueueClosed } @@ -324,10 +339,10 @@ func (q *DQue) Peek() (interface{}, error) { // DequeueBlock behaves similar to Dequeue, but is a blocking call until an item is available. func (q *DQue) DequeueBlock() (interface{}, error) { - q.mutexEmptyCond.Lock() - defer q.mutexEmptyCond.Unlock() + q.mutex.Lock() + defer q.mutex.Unlock() for { - obj, err := q.Dequeue() + obj, err := q.dequeueLocked() if err == ErrEmpty { q.emptyCond.Wait() // Wait() atomically unlocks mutexEmptyCond and suspends execution of the calling goroutine. @@ -342,10 +357,10 @@ func (q *DQue) DequeueBlock() (interface{}, error) { // PeekBlock behaves similar to Peek, but is a blocking call until an item is available. func (q *DQue) PeekBlock() (interface{}, error) { - q.mutexEmptyCond.Lock() - defer q.mutexEmptyCond.Unlock() + q.mutex.Lock() + defer q.mutex.Unlock() for { - obj, err := q.Peek() + obj, err := q.peekLocked() if err == ErrEmpty { q.emptyCond.Wait() // Wait() atomically unlocks mutexEmptyCond and suspends execution of the calling goroutine. diff --git a/vendor/modules.txt b/vendor/modules.txt index 97cf4c4cc6365..cafc9755c6684 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1042,8 +1042,8 @@ github.com/jcmturner/rpc/v2/ndr # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath -# github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible -## explicit +# github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a +## explicit; go 1.13 github.com/joncrlsn/dque # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 From 0d7b6b2622f9bcf5d9d151dc3d25a8c97c3f2039 Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Thu, 26 Oct 2023 03:30:00 -0700 Subject: [PATCH 26/33] Return empty responses for logproto request types from (#11029) We need to return empty responses for logproto request types from NewEmptyResponse based on types that can be returned from DecodeRequest. --------- Signed-off-by: Callum Styan --- pkg/logproto/compat.go | 4 +++ pkg/querier/queryrange/codec.go | 4 +++ pkg/querier/queryrange/limits_test.go | 43 +++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go index 8e3d24df800f1..a1e598a26cd1e 100644 --- a/pkg/logproto/compat.go +++ b/pkg/logproto/compat.go @@ -276,6 +276,10 @@ func (m *IndexStatsRequest) LogToSpan(sp opentracing.Span) { ) } +func (i *IndexStatsResponse) GetHeaders() []*definitions.PrometheusResponseHeader { + return nil +} + // Satisfy definitions.Request for Volume // GetStart returns the start timestamp of the request in milliseconds. diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index b24a46146b382..9f44e51b79d3b 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -1532,6 +1532,10 @@ func NewEmptyResponse(r queryrangebase.Request) (queryrangebase.Response, error) ResultType: loghttp.ResultTypeStream, }, }, nil + case *logproto.IndexStatsRequest: + return &logproto.IndexStatsResponse{}, nil + case *logproto.VolumeRequest: + return &logproto.VolumeResponse{}, nil default: return nil, fmt.Errorf("unsupported request type %T", req) } diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index 24253892cab19..98941cad8e2d1 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -3,6 +3,7 @@ package queryrange import ( "context" "fmt" + "reflect" "sync" "testing" "time" @@ -255,6 +256,48 @@ func Test_MaxQueryLookBack(t *testing.T) { require.Equal(t, resp.(*LokiResponse).Status, "success") } +func Test_MaxQueryLookBack_Types(t *testing.T) { + m := NewLimitsMiddleware(fakeLimits{ + maxQueryLookback: 1 * time.Hour, + maxQueryParallelism: 1, + }) + + now := time.Now() + type tcase struct { + request base.Request + expectedResponse base.Response + } + cases := []tcase{ + { + request: &logproto.IndexStatsRequest{ + From: model.Time(now.UnixMilli()), + Through: model.Time(now.Add(-90 * time.Minute).UnixMilli()), + }, + expectedResponse: &logproto.IndexStatsResponse{}, + }, + { + request: &logproto.VolumeRequest{ + From: model.Time(now.UnixMilli()), + Through: model.Time(now.Add(-90 * time.Minute).UnixMilli()), + }, + expectedResponse: &logproto.VolumeResponse{}, + }, + } + + ctx := user.InjectOrgID(context.Background(), "1") + + h := base.HandlerFunc(func(context.Context, base.Request) (base.Response, error) { + return nil, nil + }) + + for _, tcase := range cases { + resp, err := m.Wrap(h).Do(ctx, tcase.request) + require.NoError(t, err) + + require.Equal(t, reflect.TypeOf(tcase.expectedResponse), reflect.TypeOf(resp)) + } +} + func Test_GenerateCacheKey_NoDivideZero(t *testing.T) { l := cacheKeyLimits{WithSplitByLimits(nil, 0), nil} start := time.Now() From aedf6dfc217fb71df4e7c873b158319b0b6da27c Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Thu, 26 Oct 2023 14:01:47 +0200 Subject: [PATCH 27/33] Remove `GetHeaders` from `logproto.VolumeResponse`. (#11049) **What this PR does / why we need it**: Replaces https://github.com/grafana/loki/pull/11048/. We would return `logproto.VolumeResponse` but not match it in the codec. **Special notes for your reviewer**: **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. --- pkg/logproto/compat.go | 4 ---- pkg/querier/handler.go | 6 +++++- pkg/querier/queryrange/codec.go | 2 +- pkg/querier/queryrange/limits_test.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go index a1e598a26cd1e..fdf6f6b169cd7 100644 --- a/pkg/logproto/compat.go +++ b/pkg/logproto/compat.go @@ -323,7 +323,3 @@ func (m *VolumeRequest) LogToSpan(sp opentracing.Span) { otlog.String("end", timestamp.Time(int64(m.Through)).String()), ) } - -func (*VolumeResponse) GetHeaders() []*definitions.PrometheusResponseHeader { - return nil -} diff --git a/pkg/querier/handler.go b/pkg/querier/handler.go index 033776d461648..47a4c15e07511 100644 --- a/pkg/querier/handler.go +++ b/pkg/querier/handler.go @@ -94,7 +94,11 @@ func (h *Handler) Do(ctx context.Context, req queryrangebase.Request) (queryrang } return &queryrange.IndexStatsResponse{Response: result}, nil case *logproto.VolumeRequest: - return h.api.VolumeHandler(ctx, concrete) + result, err := h.api.VolumeHandler(ctx, concrete) + if err != nil { + return nil, err + } + return &queryrange.VolumeResponse{Response: result}, nil default: return nil, fmt.Errorf("unsupported query type %T", req) } diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 9f44e51b79d3b..27b15d6698aa7 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -1535,7 +1535,7 @@ func NewEmptyResponse(r queryrangebase.Request) (queryrangebase.Response, error) case *logproto.IndexStatsRequest: return &logproto.IndexStatsResponse{}, nil case *logproto.VolumeRequest: - return &logproto.VolumeResponse{}, nil + return &VolumeResponse{}, nil default: return nil, fmt.Errorf("unsupported request type %T", req) } diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index 98941cad8e2d1..a3b14efdbe588 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -280,7 +280,7 @@ func Test_MaxQueryLookBack_Types(t *testing.T) { From: model.Time(now.UnixMilli()), Through: model.Time(now.Add(-90 * time.Minute).UnixMilli()), }, - expectedResponse: &logproto.VolumeResponse{}, + expectedResponse: &VolumeResponse{}, }, } From f8da0db4bc29230c89377c563eb97596bf6ee018 Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Thu, 26 Oct 2023 15:26:10 +0200 Subject: [PATCH 28/33] Initialize the request codec in the module chain. (#11042) **What this PR does / why we need it**: For enterprise we must be able to override the Codec that's injected into the query frontend and the queriers. **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. --- pkg/loki/loki.go | 10 +++++++++- pkg/loki/modules.go | 14 ++++++++------ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 718b5fd13c195..0e4bc081551d7 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -40,6 +40,7 @@ import ( ingester_client "github.com/grafana/loki/pkg/ingester/client" "github.com/grafana/loki/pkg/loki/common" "github.com/grafana/loki/pkg/lokifrontend" + "github.com/grafana/loki/pkg/lokifrontend/frontend/transport" "github.com/grafana/loki/pkg/querier" "github.com/grafana/loki/pkg/querier/queryrange" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" @@ -279,6 +280,12 @@ type Frontend interface { CheckReady(_ context.Context) error } +// Codec defines methods to encode and decode requests from HTTP, httpgrpc and Protobuf. +type Codec interface { + transport.Codec + worker.GRPCCodec +} + // Loki is the root datastructure for Loki. type Loki struct { Cfg Config @@ -325,7 +332,7 @@ type Loki struct { HTTPAuthMiddleware middleware.Interface - Codec worker.GRPCCodec + Codec Codec } // New makes a new Loki. @@ -334,6 +341,7 @@ func New(cfg Config) (*Loki, error) { Cfg: cfg, clientMetrics: storage.NewClientMetrics(), deleteClientMetrics: deletion.NewDeleteRequestClientMetrics(prometheus.DefaultRegisterer), + Codec: queryrange.DefaultCodec, } analytics.Edition("oss") loki.setupAuthMiddleware() diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 6913752166d04..db0a8b5a4d7d6 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -120,6 +120,7 @@ const ( Write string = "write" Backend string = "backend" Analytics string = "analytics" + InitCodec string = "init-codec" ) const ( @@ -349,6 +350,12 @@ func (t *Loki) initDistributor() (services.Service, error) { return t.distributor, nil } +// initCodec sets the codec used to encode and decode requests. +func (t *Loki) initCodec() (services.Service, error) { + t.Codec = queryrange.DefaultCodec + return nil, nil +} + func (t *Loki) initQuerier() (services.Service, error) { if t.Cfg.Ingester.QueryStoreMaxLookBackPeriod != 0 { t.Cfg.Querier.IngesterQueryStoreMaxLookback = t.Cfg.Ingester.QueryStoreMaxLookBackPeriod @@ -504,11 +511,6 @@ func (t *Loki) initQuerier() (services.Service, error) { t.Server.HTTP.Path("/loki/api/v1/tail").Methods("GET", "POST").Handler(httpMiddleware.Wrap(http.HandlerFunc(t.querierAPI.TailHandler))) t.Server.HTTP.Path("/api/prom/tail").Methods("GET", "POST").Handler(httpMiddleware.Wrap(http.HandlerFunc(t.querierAPI.TailHandler))) - // Default codec - if t.Codec == nil { - t.Codec = queryrange.DefaultCodec - } - svc, err := querier.InitWorkerService( querierWorkerServiceConfig, prometheus.DefaultRegisterer, @@ -872,7 +874,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { t.Cfg.Server.GRPCListenPort, util_log.Logger, prometheus.DefaultRegisterer, - queryrange.DefaultCodec, + t.Codec, ) if err != nil { return nil, err From a550b767d3c8c132362165d6544a09907e613854 Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Thu, 26 Oct 2023 15:59:16 +0200 Subject: [PATCH 29/33] config: Remove already deprecated `store.max-look-back-period`. (#11038) This is already been deprecated longback. Decided to remove in next versions of Loki Signed-off-by: Kaviraj --- CHANGELOG.md | 1 + docs/sources/configure/_index.md | 4 ---- docs/sources/setup/upgrade/_index.md | 5 +++-- pkg/loki/loki.go | 5 ----- pkg/storage/config/store.go | 12 ------------ .../checker/checker_test.go | 2 +- tools/deprecated-config-checker/deleted-config.yaml | 3 +++ .../deprecated-config-checker/deprecated-config.yaml | 1 - .../test-fixtures/config.yaml | 2 +- 9 files changed, 9 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c62cf0894d703..3e40e765342d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ ##### Enhancements +* [11038](https://github.com/grafana/loki/pull/11038) **kavirajk**: Remove already deprecated `store.max-look-back-period`. * [10906](https://github.com/grafana/loki/pull/10906) **kavirajk**: Support Loki ruler to notify WAL writes to remote storage. * [10613](https://github.com/grafana/loki/pull/10613) **ngc4579**: Helm: allow GrafanaAgent tolerations * [10295](https://github.com/grafana/loki/pull/10295) **changhyuni**: Storage: remove signatureversionv2 from s3. diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index b68bba15c8d7e..f32fc97d494cf 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -2272,10 +2272,6 @@ The `chunk_store_config` block configures how chunks will be cached and how long # Cache index entries older than this period. 0 to disable. # CLI flag: -store.cache-lookups-older-than [cache_lookups_older_than: | default = 0s] - -# This flag is deprecated. Use -querier.max-query-lookback instead. -# CLI flag: -store.max-look-back-period -[max_look_back_period: | default = 0s] ``` ### schema_config diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md index 969465441cbd2..3d9dc1ac941de 100644 --- a/docs/sources/setup/upgrade/_index.md +++ b/docs/sources/setup/upgrade/_index.md @@ -47,9 +47,10 @@ The previous default value `false` is applied. #### Deprecated configuration options are removed -1. Removes already deprecated `-querier.engine.timeout` CLI flag and the corresponding YAML setting. +1. Removed already deprecated `store.max-look-back-period` CLI flag and the corresponding YAML settings. Use. Use `querier.max-query-lookback` config instead. +1. Removes already deprecated `-querier.engine.timeout` CLI flag and the corresponding YAML setting. 1. Also removes the `query_timeout` from the querier YAML section. Instead of configuring `query_timeout` under `querier`, you now configure it in [Limits Config](/docs/loki/latest/configuration/#limits_config). -1. `s3.sse-encryption` is removed. AWS now defaults encryption of all buckets to SSE-S3. Use `sse.type` to set SSE type. +1. `s3.sse-encryption` is removed. AWS now defaults encryption of all buckets to SSE-S3. Use `sse.type` to set SSE type. 1. `ruler.wal-cleaer.period` is removed. Use `ruler.wal-cleaner.period` instead. 1. `experimental.ruler.enable-api` is removed. Use `ruler.enable-api` instead. 1. `split_queries_by_interval` is removed from `query_range` YAML section. You can instead configure it in [Limits Config](/docs/loki/latest/configuration/#limits_config). diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 0e4bc081551d7..7b28d4b5ef41c 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -248,11 +248,6 @@ func (c *Config) Validate() error { if err := c.ChunkStoreConfig.Validate(util_log.Logger); err != nil { return errors.Wrap(err, "invalid chunk store config") } - // TODO(cyriltovena): remove when MaxLookBackPeriod in the storage will be fully deprecated. - if c.ChunkStoreConfig.MaxLookBackPeriod > 0 { - c.LimitsConfig.MaxQueryLookback = c.ChunkStoreConfig.MaxLookBackPeriod - } - if err := c.QueryRange.Validate(); err != nil { return errors.Wrap(err, "invalid query_range config") } diff --git a/pkg/storage/config/store.go b/pkg/storage/config/store.go index 1c93bacf0d08a..9fe276b47614c 100644 --- a/pkg/storage/config/store.go +++ b/pkg/storage/config/store.go @@ -5,8 +5,6 @@ import ( "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/dskit/flagext" "github.com/prometheus/common/model" "github.com/grafana/loki/pkg/storage/chunk/cache" @@ -28,10 +26,6 @@ type ChunkStoreConfig struct { // When DisableIndexDeduplication is true and chunk is already there in cache, only index would be written to the store and not chunk. DisableIndexDeduplication bool `yaml:"-"` - - // Limits query start time to be greater than now() - MaxLookBackPeriod, if set. - // Will be deprecated in the next major release. - MaxLookBackPeriod model.Duration `yaml:"max_look_back_period"` } func (cfg *ChunkStoreConfig) ChunkCacheStubs() bool { @@ -47,14 +41,8 @@ func (cfg *ChunkStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "", f) f.Var(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", "Cache index entries older than this period. 0 to disable.") - f.Var(&cfg.MaxLookBackPeriod, "store.max-look-back-period", "This flag is deprecated. Use -querier.max-query-lookback instead.") } func (cfg *ChunkStoreConfig) Validate(logger log.Logger) error { - if cfg.MaxLookBackPeriod > 0 { - flagext.DeprecatedFlagsUsed.Inc() - level.Warn(logger).Log("msg", "running with DEPRECATED flag -store.max-look-back-period, use -querier.max-query-lookback instead.") - } - return nil } diff --git a/tools/deprecated-config-checker/checker/checker_test.go b/tools/deprecated-config-checker/checker/checker_test.go index 929166ed4aa7d..9d93bc84b62a4 100644 --- a/tools/deprecated-config-checker/checker/checker_test.go +++ b/tools/deprecated-config-checker/checker/checker_test.go @@ -26,6 +26,7 @@ var ( "storage_config.boltdb_shipper.use_boltdb_shipper_as_backup", "storage_config.aws.sse_encryption", "storage_config.s3.sse_encryption", + "chunk_store_config.max_look_back_period", } expectedConfigDeprecates = []string{ @@ -38,7 +39,6 @@ var ( "storage_config.grpc_store", "storage_config.aws.dynamodb", "chunk_store_config.write_dedupe_cache_config", - "chunk_store_config.max_look_back_period", "limits_config.unordered_writes", "limits_config.ruler_evaluation_delay_duration", "limits_config.ruler_remote_write_url", diff --git a/tools/deprecated-config-checker/deleted-config.yaml b/tools/deprecated-config-checker/deleted-config.yaml index 9fa53d61cfbd4..b21bc995185d1 100644 --- a/tools/deprecated-config-checker/deleted-config.yaml +++ b/tools/deprecated-config-checker/deleted-config.yaml @@ -31,3 +31,6 @@ storage_config: use_boltdb_shipper_as_backup: "Since TSDB is now stable and the recommended index type, the setting has become irrelevant and therefore was removed. The previous default value false is applied." aws: *s3_deletes s3: *s3_deletes + +chunk_store_config: + max_look_back_period: "Use global or per-tenant max_query_lookback configuration from limits_config." diff --git a/tools/deprecated-config-checker/deprecated-config.yaml b/tools/deprecated-config-checker/deprecated-config.yaml index 873ef9ec76c85..0cd8e8fd8c818 100644 --- a/tools/deprecated-config-checker/deprecated-config.yaml +++ b/tools/deprecated-config-checker/deprecated-config.yaml @@ -44,7 +44,6 @@ storage_config: chunk_store_config: write_dedupe_cache_config: "Write dedupe cache is deprecated along with deprecated index types. Consider using TSDB index which does not require a write dedupe cache." - max_look_back_period: "Use global or per-tenant max_query_lookback configuration from limits_config." ## NOTE: This will also be used to validate per-tenant overrides. limits_config: diff --git a/tools/deprecated-config-checker/test-fixtures/config.yaml b/tools/deprecated-config-checker/test-fixtures/config.yaml index eaa713ff23e25..2600c63034ea4 100644 --- a/tools/deprecated-config-checker/test-fixtures/config.yaml +++ b/tools/deprecated-config-checker/test-fixtures/config.yaml @@ -47,7 +47,7 @@ chunk_store_config: cache_lookups_older_than: 1h write_dedupe_cache_config: # DEPRECATED default_validity: 30m - max_look_back_period: 1m # DEPRECATED + max_look_back_period: 1m # DELETED ruler: flush_period: 1s From 54320f27e3a046fa6e952c26174a37b9d31fd080 Mon Sep 17 00:00:00 2001 From: steve-caron-grafana <102198987+steve-caron-grafana@users.noreply.github.com> Date: Thu, 26 Oct 2023 12:31:40 -0400 Subject: [PATCH 30/33] Update _index.md (#11052) Changed the title from "Prometheus pipeline stages" to "Promtail pipeline stages" **What this PR does / why we need it**: **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. --------- Co-authored-by: J Stickler --- docs/sources/send-data/promtail/stages/_index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/send-data/promtail/stages/_index.md b/docs/sources/send-data/promtail/stages/_index.md index 32689e39d4d32..1530fedd4ad1b 100644 --- a/docs/sources/send-data/promtail/stages/_index.md +++ b/docs/sources/send-data/promtail/stages/_index.md @@ -1,5 +1,5 @@ --- -title: Prometheus pipeline stages +title: Promtail pipeline stages menuTitle: Pipeline stages description: Overview of the Promtail pipeline stages. aliases: @@ -7,7 +7,7 @@ aliases: weight: 700 --- -# Prometheus pipeline stages +# Promtail pipeline stages This section is a collection of all stages Promtail supports in a [Pipeline]({{< relref "../pipelines" >}}). From c7eb757ed8d42dbda3207ff843243e43de3dfeb5 Mon Sep 17 00:00:00 2001 From: Paul Rogers <129207811+paul1r@users.noreply.github.com> Date: Thu, 26 Oct 2023 15:08:44 -0400 Subject: [PATCH 31/33] Initial loki bloom creation library (#10957) **What this PR does / why we need it**: Optimization and benchmarking of various LRUs that sit in front of the bloom filters. Initial creation of a library that can be used to create blooms from chunks **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) --------- Co-authored-by: Owen Diehl --- pkg/storage/bloom/v1/bloom_tokenizer.go | 111 ++++ pkg/storage/bloom/v1/bloom_tokenizer_test.go | 30 ++ pkg/storage/bloom/v1/tokenizer.go | 162 ++++++ .../storage/bloom/v1}/tokenizer_test.go | 244 ++++++--- tools/tsdb/bloom-tester/lib.go | 121 ++--- tools/tsdb/bloom-tester/lib_test.go | 486 +++++++++--------- tools/tsdb/bloom-tester/lrucache.go | 339 +++++++++++- tools/tsdb/bloom-tester/lrucache_test.go | 206 ++++++++ tools/tsdb/bloom-tester/metrics.go | 5 +- tools/tsdb/bloom-tester/readlib.go | 15 +- tools/tsdb/bloom-tester/tokenizer.go | 255 --------- 11 files changed, 1298 insertions(+), 676 deletions(-) create mode 100644 pkg/storage/bloom/v1/bloom_tokenizer.go create mode 100644 pkg/storage/bloom/v1/bloom_tokenizer_test.go create mode 100644 pkg/storage/bloom/v1/tokenizer.go rename {tools/tsdb/bloom-tester => pkg/storage/bloom/v1}/tokenizer_test.go (52%) create mode 100644 tools/tsdb/bloom-tester/lrucache_test.go delete mode 100644 tools/tsdb/bloom-tester/tokenizer.go diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go new file mode 100644 index 0000000000000..7060052438190 --- /dev/null +++ b/pkg/storage/bloom/v1/bloom_tokenizer.go @@ -0,0 +1,111 @@ +package v1 + +import ( + "context" + "math" + "time" + + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/loki/pkg/chunkenc" + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/log" + "github.com/grafana/loki/pkg/storage/bloom/v1/filter" + "github.com/grafana/loki/pkg/storage/chunk" + util_log "github.com/grafana/loki/pkg/util/log" + //"github.com/grafana/loki/tools/tsdb/helpers" +) + +type metrics struct{} + +/* +BloomTokenizer is a utility that converts either Loki chunks or individual lines into tokens. +These tokens are n-grams, representing adjacent letters, that are used to populate a bloom filter. +https://en.wikipedia.org/wiki/Bloom_filter +Bloom filters are utilized for faster lookups of log lines. +*/ +type BloomTokenizer struct { + metrics *metrics + + lineTokenizer Tokenizer + chunkIDTokenizer *WrappedTokenizer + cache map[string]interface{} +} + +const CacheSize = 150000 + +// NewBloomTokenizer returns a new instance of the Bloom Tokenizer. +// Warning: the tokens returned use the same byte slice to reduce allocations. This has two consequences: +// 1) The token slices generated must not be mutated externally +// 2) The token slice must not be used after the next call to `Tokens()` as it will repopulate the slice. +// 2) This is not thread safe. +func NewBloomTokenizer(reg prometheus.Registerer) (*BloomTokenizer, error) { + t := &BloomTokenizer{ + metrics: newMetrics(reg), + } + t.cache = make(map[string]interface{}, CacheSize) + t.lineTokenizer = NewNGramTokenizer(4, 5, 0) // default to 4-grams, no skip + t.chunkIDTokenizer = ChunkIDTokenizer(t.lineTokenizer) + + level.Info(util_log.Logger).Log("bloom tokenizer created") + + return t, nil +} + +func (bt *BloomTokenizer) SetLineTokenizer(t Tokenizer) { + bt.lineTokenizer = t + bt.chunkIDTokenizer = ChunkIDTokenizer(bt.lineTokenizer) +} + +// TODO: Something real here with metrics +func newMetrics(r prometheus.Registerer) *metrics { + return &metrics{} +} + +func clearCache(cache map[string]interface{}) { + for k := range cache { + delete(cache, k) + } +} + +func (bt *BloomTokenizer) PopulateSBF(sbf *filter.ScalableBloomFilter, chunks []chunk.Chunk) { + clearCache(bt.cache) + for idx := range chunks { + lc := chunks[idx].Data.(*chunkenc.Facade).LokiChunk() + bt.chunkIDTokenizer.Reinit(chunks[idx].ChunkRef) + + // TODO: error handling + itr, _ := lc.Iterator( + context.Background(), + time.Unix(0, 0), // TODO: Parameterize/better handle the timestamps? + time.Unix(0, math.MaxInt64), + logproto.FORWARD, + log.NewNoopPipeline().ForStream(chunks[idx].Metric), + ) + + for itr.Next() && itr.Error() == nil { + toks := bt.chunkIDTokenizer.Tokens(itr.Entry().Line) + + for _, tok := range toks { + if tok.Key != nil { + str := string(tok.Key) + _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters + if !found { + bt.cache[str] = nil + + sbf.TestAndAdd(tok.Key) + + if len(bt.cache) > 150000 { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other + clearCache(bt.cache) + } + } + } + } + } + } // for each chunk +} + +func (bt *BloomTokenizer) TokenizeLine(line string) []Token { + return bt.lineTokenizer.Tokens(line) +} diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go new file mode 100644 index 0000000000000..eaff6c783771b --- /dev/null +++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go @@ -0,0 +1,30 @@ +package v1 + +import ( + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +func BenchmarkMapClear(b *testing.B) { + bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer) + for i := 0; i < b.N; i++ { + for k := 0; k < CacheSize; k++ { + bt.cache[fmt.Sprint(k)] = k + } + + clearCache(bt.cache) + } +} + +func BenchmarkNewMap(b *testing.B) { + bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer) + for i := 0; i < b.N; i++ { + for k := 0; k < CacheSize; k++ { + bt.cache[fmt.Sprint(k)] = k + } + + bt.cache = make(map[string]interface{}, CacheSize) + } +} diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go new file mode 100644 index 0000000000000..22da439f07f42 --- /dev/null +++ b/pkg/storage/bloom/v1/tokenizer.go @@ -0,0 +1,162 @@ +package v1 + +import ( + "encoding/binary" + "unicode/utf8" + + "github.com/grafana/loki/pkg/logproto" +) + +type Token struct { + Key []byte +} + +type Tokenizer interface { + Tokens(line string) []Token + GetSkip() int + GetMin() int + GetMax() int +} + +const TokenBufferSize = 4096 +const TokenKeySize = 132 + +type NgramTokenizer struct { + // [min,max) exclusivity + min, max, skip int + buffers [][]rune // circular buffers used for ngram generation + runeBuffer []byte // buffer used for token generation + internalTokenBuffer []Token // circular buffer for tokens +} + +/* +N-Grams (https://en.wikipedia.org/wiki/N-gram) are a series of 'n' adjacent characters in a string. +These will be utilized for the bloom filters to allow for fuzzy searching. +*/ +func NewNGramTokenizer(min, max, skip int) *NgramTokenizer { + capacity := max - min + t := &NgramTokenizer{ + min: min, + max: max, + skip: skip, + buffers: make([][]rune, capacity), + runeBuffer: make([]byte, 0, max*4), + internalTokenBuffer: make([]Token, 0, TokenBufferSize), + } + + for i := range t.buffers { + t.buffers[i] = make([]rune, t.min+i) + } + + for i := 0; i < cap(t.internalTokenBuffer); i++ { + t.internalTokenBuffer = append(t.internalTokenBuffer, Token{Key: make([]byte, 0, TokenKeySize)}) + } + + return t +} + +func (t *NgramTokenizer) GetSkip() int { + return t.skip +} + +func (t *NgramTokenizer) GetMin() int { + return t.min +} + +func (t *NgramTokenizer) GetMax() int { + return t.max +} + +func (t *NgramTokenizer) Tokens(line string) []Token { + var i int // rune index (not position that is measured in the range loop) + numToks := 0 + for _, r := range line { + + // j is the index of the buffer to use + for j := 0; j < (t.max - t.min); j++ { + // n is the length of the ngram + n := j + t.min + // pos is the position in the buffer to overwrite + pos := i % n + t.buffers[j][pos] = r + + if i >= n-1 && (i+1-n)%(t.skip+1) == 0 { + t.runeBuffer = reassemble(t.buffers[j], (i+1)%n, t.runeBuffer) + if numToks >= cap(t.internalTokenBuffer) || numToks == len(t.internalTokenBuffer) { + t.internalTokenBuffer = append(t.internalTokenBuffer, Token{Key: make([]byte, 0, TokenKeySize)}) + } + t.internalTokenBuffer[numToks].Key = t.internalTokenBuffer[numToks].Key[:0] + t.internalTokenBuffer[numToks].Key = append(t.internalTokenBuffer[numToks].Key, t.runeBuffer...) + numToks++ + } + } + i++ + } + return t.internalTokenBuffer[0:numToks] +} + +func reassemble(buf []rune, pos int, result []byte) []byte { + result = result[:0] // Reset the result slice + for i := 0; i < len(buf); i++ { + cur := (pos + i) % len(buf) + result = utf8.AppendRune(result, buf[cur]) + } + return result +} + +func chunkIDTransformer(tok Token, prefix []byte) Token { + tok.Key = append(append(tok.Key, prefix...), tok.Key...)[len(tok.Key):] + return tok +} + +type WrappedTokenizer struct { + t Tokenizer + tokenBuffer []Token + prefix []byte + i64buf []byte + i32buf []byte +} + +func (w *WrappedTokenizer) Tokens(line string) []Token { + w.tokenBuffer = w.tokenBuffer[:0] // Reset the result slice + toks := w.t.Tokens(line) + for _, tok := range toks { + w.tokenBuffer = append(w.tokenBuffer, chunkIDTransformer(tok, w.prefix), tok) + } + + return w.tokenBuffer +} + +func (w *WrappedTokenizer) GetSkip() int { + return w.t.GetSkip() +} + +func (w *WrappedTokenizer) GetMin() int { + return w.t.GetMin() +} + +func (w *WrappedTokenizer) GetMax() int { + return w.t.GetMax() +} + +func ChunkIDTokenizer(t Tokenizer) *WrappedTokenizer { + p := make([]byte, 0, 256) + return &WrappedTokenizer{ + t: t, + tokenBuffer: make([]Token, 0, TokenBufferSize), + prefix: p, + i64buf: make([]byte, binary.MaxVarintLen64), + i32buf: make([]byte, 4), + } +} + +func (w *WrappedTokenizer) Reinit(chk logproto.ChunkRef) { + w.prefix = w.prefix[:0] + + binary.PutVarint(w.i64buf, int64(chk.From)) + w.prefix = append(w.prefix, w.i64buf...) + binary.PutVarint(w.i64buf, int64(chk.Through)) + w.prefix = append(w.prefix, w.i64buf...) + binary.LittleEndian.PutUint32(w.i32buf, chk.Checksum) + w.prefix = append(w.prefix, w.i32buf...) +} diff --git a/tools/tsdb/bloom-tester/tokenizer_test.go b/pkg/storage/bloom/v1/tokenizer_test.go similarity index 52% rename from tools/tsdb/bloom-tester/tokenizer_test.go rename to pkg/storage/bloom/v1/tokenizer_test.go index b0660663e1809..8a2c32d7930d8 100644 --- a/tools/tsdb/bloom-tester/tokenizer_test.go +++ b/pkg/storage/bloom/v1/tokenizer_test.go @@ -1,17 +1,32 @@ -package main +package v1 import ( "bufio" "encoding/binary" - "github.com/grafana/loki/pkg/logproto" "os" "testing" + "github.com/grafana/loki/pkg/logproto" + "github.com/stretchr/testify/require" ) -func TestNGramTokenizer(t *testing.T) { - tokenizer := threeSkip2 +const BigFile = "../../../logql/sketch/testdata/war_peace.txt" + +var ( + twoSkipOne = NewNGramTokenizer(2, 3, 1) + three = NewNGramTokenizer(3, 4, 0) + threeSkip1 = NewNGramTokenizer(3, 4, 1) + threeSkip2 = NewNGramTokenizer(3, 4, 2) + four = NewNGramTokenizer(4, 5, 0) + fourSkip1 = NewNGramTokenizer(4, 5, 1) + fourSkip2 = NewNGramTokenizer(4, 5, 2) + five = NewNGramTokenizer(5, 6, 0) + six = NewNGramTokenizer(6, 7, 0) +) + +func TestNGrams(t *testing.T) { + tokenizer := NewNGramTokenizer(2, 4, 0) for _, tc := range []struct { desc string input string @@ -27,10 +42,25 @@ func TestNGramTokenizer(t *testing.T) { input: "a", exp: []Token{}, }, + { + desc: "two chars", + input: "ab", + exp: []Token{{Key: []byte("ab")}}, + }, + { + desc: "three chars", + input: "abc", + exp: []Token{{Key: []byte("ab")}, {Key: []byte("bc")}, {Key: []byte("abc")}}, + }, { desc: "four chars", input: "abcd", - exp: []Token{{Key: []byte("abc"), Value: "abc"}}, + exp: []Token{{Key: []byte("ab")}, {Key: []byte("bc")}, {Key: []byte("abc")}, {Key: []byte("cd")}, {Key: []byte("bcd")}}, + }, + { + desc: "foo", + input: "日本語", + exp: []Token{{Key: []byte("日本")}, {Key: []byte("本語")}, {Key: []byte("日本語")}}, }, } { t.Run(tc.desc, func(t *testing.T) { @@ -39,7 +69,50 @@ func TestNGramTokenizer(t *testing.T) { } } -func Test3Gram0SkipTokenizer(t *testing.T) { +func TestNGramsSkip(t *testing.T) { + + for _, tc := range []struct { + desc string + tokenizer *NgramTokenizer + input string + exp []Token + }{ + { + desc: "four chars", + tokenizer: twoSkipOne, + input: "abcd", + exp: []Token{{Key: []byte("ab")}, {Key: []byte("cd")}}, + }, + { + desc: "special chars", + tokenizer: twoSkipOne, + input: "日本語", + exp: []Token{{Key: []byte("日本")}}, + }, + { + desc: "multi", + tokenizer: NewNGramTokenizer(2, 4, 1), + input: "abcdefghij", + exp: []Token{ + {Key: []byte("ab")}, + {Key: []byte("abc")}, + {Key: []byte("cd")}, + {Key: []byte("cde")}, + {Key: []byte("ef")}, + {Key: []byte("efg")}, + {Key: []byte("gh")}, + {Key: []byte("ghi")}, + {Key: []byte("ij")}, + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + require.Equal(t, tc.exp, tc.tokenizer.Tokens(tc.input)) + }) + } +} + +func Test3GramSkip0Tokenizer(t *testing.T) { tokenizer := three for _, tc := range []struct { desc string @@ -59,12 +132,12 @@ func Test3Gram0SkipTokenizer(t *testing.T) { { desc: "three char", input: "abc", - exp: []Token{{Key: []byte("abc"), Value: "abc"}}, + exp: []Token{{Key: []byte("abc")}}, }, { desc: "four chars", input: "abcd", - exp: []Token{{Key: []byte("abc"), Value: "abc"}, {Key: []byte("bcd"), Value: "bcd"}}, + exp: []Token{{Key: []byte("abc")}, {Key: []byte("bcd")}}, }, } { t.Run(tc.desc, func(t *testing.T) { @@ -73,7 +146,7 @@ func Test3Gram0SkipTokenizer(t *testing.T) { } } -func Test3Gram1SkipTokenizer(t *testing.T) { +func Test3GramSkip1Tokenizer(t *testing.T) { tokenizer := threeSkip1 for _, tc := range []struct { desc string @@ -93,17 +166,46 @@ func Test3Gram1SkipTokenizer(t *testing.T) { { desc: "three char", input: "abc", - exp: []Token{{Key: []byte("abc"), Value: "abc"}}, + exp: []Token{{Key: []byte("abc")}}, }, { desc: "four chars", input: "abcd", - exp: []Token{{Key: []byte("abc"), Value: "abc"}}, + exp: []Token{{Key: []byte("abc")}}, }, { desc: "five chars", input: "abcde", - exp: []Token{{Key: []byte("abc"), Value: "abc"}, {Key: []byte("cde"), Value: "cde"}}, + exp: []Token{{Key: []byte("abc")}, {Key: []byte("cde")}}, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) + }) + } +} + +func Test3GramSkip2Tokenizer(t *testing.T) { + tokenizer := threeSkip2 + for _, tc := range []struct { + desc string + input string + exp []Token + }{ + { + desc: "empty", + input: "", + exp: []Token{}, + }, + { + desc: "single char", + input: "a", + exp: []Token{}, + }, + { + desc: "four chars", + input: "abcd", + exp: []Token{{Key: []byte("abc")}}, }, } { t.Run(tc.desc, func(t *testing.T) { @@ -112,7 +214,7 @@ func Test3Gram1SkipTokenizer(t *testing.T) { } } -func Test4Gram0SkipTokenizer(t *testing.T) { +func Test4GramSkip0Tokenizer(t *testing.T) { tokenizer := four for _, tc := range []struct { desc string @@ -137,12 +239,12 @@ func Test4Gram0SkipTokenizer(t *testing.T) { { desc: "four chars", input: "abcd", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}}, + exp: []Token{{Key: []byte("abcd")}}, }, { desc: "five chars", input: "abcde", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}, {Key: []byte("bcde"), Value: "bcde"}}, + exp: []Token{{Key: []byte("abcd")}, {Key: []byte("bcde")}}, }, } { t.Run(tc.desc, func(t *testing.T) { @@ -151,7 +253,7 @@ func Test4Gram0SkipTokenizer(t *testing.T) { } } -func Test4Gram1SkipTokenizer(t *testing.T) { +func Test4GramSkip1Tokenizer(t *testing.T) { tokenizer := fourSkip1 for _, tc := range []struct { desc string @@ -176,27 +278,27 @@ func Test4Gram1SkipTokenizer(t *testing.T) { { desc: "four chars", input: "abcd", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}}, + exp: []Token{{Key: []byte("abcd")}}, }, { desc: "five chars", input: "abcde", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}}, + exp: []Token{{Key: []byte("abcd")}}, }, { desc: "six chars", input: "abcdef", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}, {Key: []byte("cdef"), Value: "cdef"}}, + exp: []Token{{Key: []byte("abcd")}, {Key: []byte("cdef")}}, }, { desc: "seven chars", input: "abcdefg", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}, {Key: []byte("cdef"), Value: "cdef"}}, + exp: []Token{{Key: []byte("abcd")}, {Key: []byte("cdef")}}, }, { desc: "eight chars", input: "abcdefgh", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}, {Key: []byte("cdef"), Value: "cdef"}, {Key: []byte("efgh"), Value: "efgh"}}, + exp: []Token{{Key: []byte("abcd")}, {Key: []byte("cdef")}, {Key: []byte("efgh")}}, }, } { t.Run(tc.desc, func(t *testing.T) { @@ -205,7 +307,7 @@ func Test4Gram1SkipTokenizer(t *testing.T) { } } -func Test4Gram2SkipTokenizer(t *testing.T) { +func Test4GramSkip2Tokenizer(t *testing.T) { tokenizer := fourSkip2 for _, tc := range []struct { desc string @@ -230,37 +332,37 @@ func Test4Gram2SkipTokenizer(t *testing.T) { { desc: "four chars", input: "abcd", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}}, + exp: []Token{{Key: []byte("abcd")}}, }, { desc: "five chars", input: "abcde", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}}, + exp: []Token{{Key: []byte("abcd")}}, }, { desc: "six chars", input: "abcdef", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}}, + exp: []Token{{Key: []byte("abcd")}}, }, { desc: "seven chars", input: "abcdefg", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}, {Key: []byte("defg"), Value: "defg"}}, + exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}}, }, { desc: "eight chars", input: "abcdefgh", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}, {Key: []byte("defg"), Value: "defg"}}, + exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}}, }, { desc: "nine chars", input: "abcdefghi", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}, {Key: []byte("defg"), Value: "defg"}}, + exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}}, }, { desc: "ten chars", input: "abcdefghij", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}, {Key: []byte("defg"), Value: "defg"}, {Key: []byte("ghij"), Value: "ghij"}}, + exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}, {Key: []byte("ghij")}}, }, } { t.Run(tc.desc, func(t *testing.T) { @@ -269,7 +371,7 @@ func Test4Gram2SkipTokenizer(t *testing.T) { } } -func Test5Gram0SkipTokenizer(t *testing.T) { +func Test5GramSkip0Tokenizer(t *testing.T) { tokenizer := five for _, tc := range []struct { desc string @@ -299,12 +401,12 @@ func Test5Gram0SkipTokenizer(t *testing.T) { { desc: "five chars", input: "abcde", - exp: []Token{{Key: []byte("abcde"), Value: "abcde"}}, + exp: []Token{{Key: []byte("abcde")}}, }, { desc: "six chars", input: "abcdef", - exp: []Token{{Key: []byte("abcde"), Value: "abcde"}, {Key: []byte("bcdef"), Value: "bcdef"}}, + exp: []Token{{Key: []byte("abcde")}, {Key: []byte("bcdef")}}, }, } { t.Run(tc.desc, func(t *testing.T) { @@ -313,7 +415,7 @@ func Test5Gram0SkipTokenizer(t *testing.T) { } } -func Test6Gram0SkipTokenizer(t *testing.T) { +func Test6GramSkip0Tokenizer(t *testing.T) { tokenizer := six for _, tc := range []struct { desc string @@ -348,12 +450,12 @@ func Test6Gram0SkipTokenizer(t *testing.T) { { desc: "six chars", input: "abcdef", - exp: []Token{{Key: []byte("abcdef"), Value: "abcdef"}}, + exp: []Token{{Key: []byte("abcdef")}}, }, { desc: "seven chars", input: "abcdefg", - exp: []Token{{Key: []byte("abcdef"), Value: "abcdef"}, {Key: []byte("bcdefg"), Value: "bcdefg"}}, + exp: []Token{{Key: []byte("abcdef")}, {Key: []byte("bcdefg")}}, }, } { t.Run(tc.desc, func(t *testing.T) { @@ -369,13 +471,10 @@ func makeBuf(from, through, checksum int) []byte { binary.PutVarint(i64buf, int64(from)) p = append(p, i64buf...) - p = append(p, 58) binary.PutVarint(i64buf, int64(through)) p = append(p, i64buf...) - p = append(p, 58) binary.LittleEndian.PutUint32(i32buf, uint32(checksum)) p = append(p, i32buf...) - p = append(p, 58) return p } @@ -400,46 +499,43 @@ func TestWrappedTokenizer(t *testing.T) { desc: "four chars", input: "abcd", exp: []Token{ - {Key: append(makeBuf(0, 999999, 1), []byte("abc")...), Value: string(makeBuf(0, 999999, 1)) + "abc"}, - {Key: []byte("abc"), Value: "abc"}}, + {Key: append(makeBuf(0, 999999, 1), []byte("abc")...)}, + {Key: []byte("abc")}}, }, { desc: "uuid", input: "2b1a5e46-36a2-4694-a4b1-f34cc7bdfc45", exp: []Token{ - {Key: append(makeBuf(0, 999999, 1), []byte("2b1")...), Value: string(makeBuf(0, 999999, 1)) + "2b1"}, - {Key: append(makeBuf(0, 999999, 1), []byte("a5e")...), Value: string(makeBuf(0, 999999, 1)) + "a5e"}, - {Key: append(makeBuf(0, 999999, 1), []byte("46-")...), Value: string(makeBuf(0, 999999, 1)) + "46-"}, - {Key: append(makeBuf(0, 999999, 1), []byte("36a")...), Value: string(makeBuf(0, 999999, 1)) + "36a"}, - {Key: append(makeBuf(0, 999999, 1), []byte("2-4")...), Value: string(makeBuf(0, 999999, 1)) + "2-4"}, - {Key: append(makeBuf(0, 999999, 1), []byte("694")...), Value: string(makeBuf(0, 999999, 1)) + "694"}, - {Key: append(makeBuf(0, 999999, 1), []byte("-a4")...), Value: string(makeBuf(0, 999999, 1)) + "-a4"}, - {Key: append(makeBuf(0, 999999, 1), []byte("b1-")...), Value: string(makeBuf(0, 999999, 1)) + "b1-"}, - {Key: append(makeBuf(0, 999999, 1), []byte("f34")...), Value: string(makeBuf(0, 999999, 1)) + "f34"}, - {Key: append(makeBuf(0, 999999, 1), []byte("cc7")...), Value: string(makeBuf(0, 999999, 1)) + "cc7"}, - {Key: append(makeBuf(0, 999999, 1), []byte("bdf")...), Value: string(makeBuf(0, 999999, 1)) + "bdf"}, - {Key: append(makeBuf(0, 999999, 1), []byte("c45")...), Value: string(makeBuf(0, 999999, 1)) + "c45"}, - {Key: []byte("2b1"), Value: "2b1"}, - {Key: []byte("a5e"), Value: "a5e"}, - {Key: []byte("46-"), Value: "46-"}, - {Key: []byte("36a"), Value: "36a"}, - {Key: []byte("2-4"), Value: "2-4"}, - {Key: []byte("694"), Value: "694"}, - {Key: []byte("-a4"), Value: "-a4"}, - {Key: []byte("b1-"), Value: "b1-"}, - {Key: []byte("f34"), Value: "f34"}, - {Key: []byte("cc7"), Value: "cc7"}, - {Key: []byte("bdf"), Value: "bdf"}, - {Key: []byte("c45"), Value: "c45"}, + {Key: append(makeBuf(0, 999999, 1), []byte("2b1")...)}, + {Key: []byte("2b1")}, + {Key: append(makeBuf(0, 999999, 1), []byte("a5e")...)}, + {Key: []byte("a5e")}, + {Key: append(makeBuf(0, 999999, 1), []byte("46-")...)}, + {Key: []byte("46-")}, + {Key: append(makeBuf(0, 999999, 1), []byte("36a")...)}, + {Key: []byte("36a")}, + {Key: append(makeBuf(0, 999999, 1), []byte("2-4")...)}, + {Key: []byte("2-4")}, + {Key: append(makeBuf(0, 999999, 1), []byte("694")...)}, + {Key: []byte("694")}, + {Key: append(makeBuf(0, 999999, 1), []byte("-a4")...)}, + {Key: []byte("-a4")}, + {Key: append(makeBuf(0, 999999, 1), []byte("b1-")...)}, + {Key: []byte("b1-")}, + {Key: append(makeBuf(0, 999999, 1), []byte("f34")...)}, + {Key: []byte("f34")}, + {Key: append(makeBuf(0, 999999, 1), []byte("cc7")...)}, + {Key: []byte("cc7")}, + {Key: append(makeBuf(0, 999999, 1), []byte("bdf")...)}, + {Key: []byte("bdf")}, + {Key: append(makeBuf(0, 999999, 1), []byte("c45")...)}, + {Key: []byte("c45")}, }, }, } { t.Run(tc.desc, func(t *testing.T) { - chunkTokenizer := ChunkIDTokenizer(logproto.ChunkRef{Fingerprint: 1, - From: 0, - Through: 999999, - Checksum: 1, - }, tokenizer) + chunkTokenizer := ChunkIDTokenizer(tokenizer) + chunkTokenizer.Reinit(logproto.ChunkRef{From: 0, Through: 999999, Checksum: 1}) require.Equal(t, tc.exp, chunkTokenizer.Tokens(tc.input)) }) } @@ -448,7 +544,7 @@ func TestWrappedTokenizer(t *testing.T) { func BenchmarkTokens(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - file, _ := os.Open("big.txt") + file, _ := os.Open(BigFile) defer file.Close() scanner := bufio.NewScanner(file) @@ -460,17 +556,19 @@ func BenchmarkTokens(b *testing.B) { } } -func BenchmarkOldTokens(b *testing.B) { +func BenchmarkWrappedTokens(b *testing.B) { + chunkTokenizer := ChunkIDTokenizer(three) + chunkTokenizer.Reinit(logproto.ChunkRef{From: 0, Through: 999999, Checksum: 1}) for i := 0; i < b.N; i++ { b.StopTimer() - file, _ := os.Open("big.txt") + file, _ := os.Open(BigFile) defer file.Close() scanner := bufio.NewScanner(file) b.StartTimer() for scanner.Scan() { line := scanner.Text() - _ = three.OldTokens(line) + _ = chunkTokenizer.Tokens(line) } } } diff --git a/tools/tsdb/bloom-tester/lib.go b/tools/tsdb/bloom-tester/lib.go index 7ea76fa522301..edc804cad5fe4 100644 --- a/tools/tsdb/bloom-tester/lib.go +++ b/tools/tsdb/bloom-tester/lib.go @@ -10,7 +10,6 @@ import ( "github.com/grafana/loki/pkg/storage/bloom/v1/filter" tsdbindex "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index" - //"github.com/grafana/loki/pkg/storage/stores/tsdb/index" "hash/fnv" "math" "os" @@ -26,8 +25,8 @@ import ( "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/loki/pkg/logql/log" "github.com/grafana/loki/pkg/storage" + bt "github.com/grafana/loki/pkg/storage/bloom/v1" "github.com/grafana/loki/pkg/storage/chunk" "github.com/grafana/loki/pkg/storage/chunk/client" "github.com/grafana/loki/pkg/storage/config" @@ -89,15 +88,15 @@ func execute() { } var ( - three = newNGramTokenizer(3, 4, 0) - threeSkip1 = newNGramTokenizer(3, 4, 1) - threeSkip2 = newNGramTokenizer(3, 4, 2) - threeSkip3 = newNGramTokenizer(3, 4, 3) - four = newNGramTokenizer(4, 5, 0) - fourSkip1 = newNGramTokenizer(4, 5, 1) - fourSkip2 = newNGramTokenizer(4, 5, 2) - five = newNGramTokenizer(5, 6, 0) - six = newNGramTokenizer(6, 7, 0) + three = bt.NewNGramTokenizer(3, 4, 0) + threeSkip1 = bt.NewNGramTokenizer(3, 4, 1) + threeSkip2 = bt.NewNGramTokenizer(3, 4, 2) + threeSkip3 = bt.NewNGramTokenizer(3, 4, 3) + four = bt.NewNGramTokenizer(4, 5, 0) + fourSkip1 = bt.NewNGramTokenizer(4, 5, 1) + fourSkip2 = bt.NewNGramTokenizer(4, 5, 2) + five = bt.NewNGramTokenizer(5, 6, 0) + six = bt.NewNGramTokenizer(6, 7, 0) onePctError = func() *filter.ScalableBloomFilter { return filter.NewScalableBloomFilter(1024, 0.01, 0.8) } fivePctError = func() *filter.ScalableBloomFilter { return filter.NewScalableBloomFilter(1024, 0.05, 0.8) } @@ -120,26 +119,26 @@ var experiments = []Experiment{ true, onePctError, ), - - NewExperiment( - "token=4skip1_error=1%_indexchunks=true", - fourSkip1, - true, - onePctError, - ), /* + NewExperiment( + "token=4skip1_error=1%_indexchunks=true", + fourSkip1, + true, + onePctError, + ), + + NewExperiment( + "token=4skip2_error=1%_indexchunks=true", + fourSkip2, + true, + onePctError, + ), NewExperiment( - "token=4skip2_error=1%_indexchunks=true", - fourSkip2, + "token=4skip0_error=5%_indexchunks=true", + four, true, - onePctError, + fivePctError, ),*/ - NewExperiment( - "token=4skip0_error=5%_indexchunks=true", - four, - true, - fivePctError, - ), /* NewExperiment( "token=4skip1_error=5%_indexchunks=true", @@ -266,11 +265,10 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS } level.Info(util_log.Logger).Log("msg", "starting analyze()", "tester", testerNumber, "total", numTesters) - var n int // count iterated series - reportEvery := 10 // report every n chunks + var n int // count iterated series //pool := newPool(runtime.NumCPU()) //pool := newPool(1) - + bloomTokenizer, _ := bt.NewBloomTokenizer(prometheus.DefaultRegisterer) for _, tenant := range tenants { level.Info(util_log.Logger).Log("Analyzing tenant", tenant, "table", tableName) err := indexShipper.ForEach( @@ -308,8 +306,6 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS return } - cache := NewLRUCache4(150000) - transformed := make([]chunk.Chunk, 0, len(chks)) for _, chk := range chks { transformed = append(transformed, chunk.Chunk{ @@ -333,11 +329,10 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS for _, c := range got { chunkTotalUncompressedSize += c.Data.(*chunkenc.Facade).LokiChunk().UncompressedSize() } - metrics.chunkSize.Observe(float64(chunkTotalUncompressedSize)) n += len(got) // iterate experiments - for experimentIdx, experiment := range experiments { + for _, experiment := range experiments { bucketPrefix := os.Getenv("BUCKET_PREFIX") if strings.EqualFold(bucketPrefix, "") { bucketPrefix = "named-experiments-" @@ -348,63 +343,13 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS tenant, ls.String(), objectClient) { + bloomTokenizer.SetLineTokenizer(experiment.tokenizer) level.Info(util_log.Logger).Log("Starting work on: ", ls.String(), "'", FNV32a(ls.String()), "'", experiment.name, tenant) startTime := time.Now().UnixMilli() sbf := experiment.bloom() - cache.Clear() - - // Iterate chunks - var ( - lines, inserts, collisions float64 - ) - for cidx := range got { - chunkTokenizer := ChunkIDTokenizer(got[cidx].ChunkRef, experiment.tokenizer) - - var tokenizer Tokenizer = chunkTokenizer - if !experiment.encodeChunkID { - tokenizer = experiment.tokenizer // so I don't have to change the lines of code below - } - lc := got[cidx].Data.(*chunkenc.Facade).LokiChunk() - - // Only report on the last experiment since they run serially - if experimentIdx == len(experiments)-1 && (n+cidx+1)%reportEvery == 0 { - estimatedProgress := float64(fp) / float64(model.Fingerprint(math.MaxUint64)) * 100. - level.Info(util_log.Logger).Log( - "msg", "iterated", - "progress", fmt.Sprintf("%.2f%%", estimatedProgress), - "chunks", len(chks), - "series", ls.String(), - ) - } - - itr, err := lc.Iterator( - context.Background(), - time.Unix(0, 0), - time.Unix(0, math.MaxInt64), - logproto.FORWARD, - log.NewNoopPipeline().ForStream(ls), - ) - helpers.ExitErr("getting iterator", err) - - for itr.Next() && itr.Error() == nil { - toks := tokenizer.Tokens(itr.Entry().Line) - lines++ - for _, tok := range toks { - if tok.Key != nil { - if !cache.GetString(tok.Value) { - cache.PutStringByte(tok.Value, tok.Key) - if dup := sbf.TestAndAdd(tok.Key); dup { - collisions++ - } - inserts++ - } - } - } - } - helpers.ExitErr("iterating chunks", itr.Error()) - } // for each chunk + bloomTokenizer.PopulateSBF(sbf, got) endTime := time.Now().UnixMilli() if len(got) > 0 { @@ -414,9 +359,6 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS metrics.estimatedCount.WithLabelValues(experiment.name).Observe( float64(estimatedCount(sbf.Capacity(), sbf.FillRatio())), ) - metrics.lines.WithLabelValues(experiment.name).Add(lines) - metrics.inserts.WithLabelValues(experiment.name).Add(inserts) - metrics.collisions.WithLabelValues(experiment.name).Add(collisions) writeSBF(sbf, os.Getenv("DIR"), @@ -428,6 +370,7 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS metrics.sbfCreationTime.WithLabelValues(experiment.name).Add(float64(endTime - startTime)) metrics.sbfsCreated.WithLabelValues(experiment.name).Inc() + metrics.chunkSize.Observe(float64(chunkTotalUncompressedSize)) if err != nil { helpers.ExitErr("writing sbf to file", err) diff --git a/tools/tsdb/bloom-tester/lib_test.go b/tools/tsdb/bloom-tester/lib_test.go index 68460897c53ff..419ff44f59007 100644 --- a/tools/tsdb/bloom-tester/lib_test.go +++ b/tools/tsdb/bloom-tester/lib_test.go @@ -3,262 +3,151 @@ package main import ( "bufio" "os" - "strconv" "testing" - - "github.com/stretchr/testify/require" ) -func TestNGrams(t *testing.T) { - tokenizer := newNGramTokenizer(2, 4, 0) - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "two chars", - input: "ab", - exp: []Token{{Key: []byte("ab"), Value: "ab"}}, - }, - { - desc: "three chars", - input: "abc", - exp: []Token{{Key: []byte("ab"), Value: "ab"}, {Key: []byte("bc"), Value: "bc"}, {Key: []byte("abc"), Value: "abc"}}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{{Key: []byte("ab"), Value: "ab"}, {Key: []byte("bc"), Value: "bc"}, {Key: []byte("abc"), Value: "abc"}, {Key: []byte("cd"), Value: "cd"}, {Key: []byte("bcd"), Value: "bcd"}}, - }, - { - desc: "foo", - input: "日本語", - exp: []Token{{Key: []byte("日本"), Value: "日本"}, {Key: []byte("本語"), Value: "本語"}, {Key: []byte("日本語"), Value: "日本語"}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func Test4NGrams(t *testing.T) { - tokenizer := four - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "two chars", - input: "ab", - exp: []Token{}, - }, - { - desc: "three chars", - input: "abc", - exp: []Token{}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}}, - }, - { - desc: "five chars", - input: "abcde", - exp: []Token{{Key: []byte("abcd"), Value: "abcd"}, {Key: []byte("bcde"), Value: "bcde"}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func Test6NGrams(t *testing.T) { - tokenizer := six - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "two chars", - input: "ab", - exp: []Token{}, - }, - { - desc: "three chars", - input: "abc", - exp: []Token{}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{}, - }, - { - desc: "five chars", - input: "abcde", - exp: []Token{}, - }, - { - desc: "six chars", - input: "abcdef", - exp: []Token{{Key: []byte("abcdef"), Value: "abcdef"}}, - }, - { - desc: "seven chars", - input: "abcdefg", - exp: []Token{{Key: []byte("abcdef"), Value: "abcdef"}, {Key: []byte("bcdefg"), Value: "bcdefg"}}, - }, - { - desc: "eight chars", - input: "abcdefgh", - exp: []Token{{Key: []byte("abcdef"), Value: "abcdef"}, {Key: []byte("bcdefg"), Value: "bcdefg"}, {Key: []byte("cdefgh"), Value: "cdefgh"}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func TestNGramsSkip(t *testing.T) { - twoSkipOne := newNGramTokenizer(2, 3, 1) - for _, tc := range []struct { - desc string - tokenizer *ngramTokenizer - input string - exp []Token - }{ - { - desc: "four chars", - tokenizer: twoSkipOne, - input: "abcd", - exp: []Token{{Key: []byte("ab"), Value: "ab"}, {Key: []byte("cd"), Value: "cd"}}, - }, - { - desc: "special chars", - tokenizer: twoSkipOne, - input: "日本語", - exp: []Token{{Key: []byte("日本"), Value: "日本"}}, - }, - { - desc: "multi", - tokenizer: newNGramTokenizer(2, 4, 1), - input: "abcdefghij", - exp: []Token{ - {Key: []byte("ab"), Value: "ab"}, - {Key: []byte("abc"), Value: "abc"}, - {Key: []byte("cd"), Value: "cd"}, - {Key: []byte("cde"), Value: "cde"}, - {Key: []byte("ef"), Value: "ef"}, - {Key: []byte("efg"), Value: "efg"}, - {Key: []byte("gh"), Value: "gh"}, - {Key: []byte("ghi"), Value: "ghi"}, - {Key: []byte("ij"), Value: "ij"}, - }, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tc.tokenizer.Tokens(tc.input)) - }) - } -} - -var num = 1000000 +const BigFile = "../../../pkg/logql/sketch/testdata/war_peace.txt" -func BenchmarkLRU1Put(b *testing.B) { - cache := NewLRUCache(num) +func BenchmarkSBFTestAndAdd(b *testing.B) { for i := 0; i < b.N; i++ { - cache.Put(strconv.Itoa(i)) + b.StopTimer() + file, _ := os.Open(BigFile) + defer file.Close() + scanner := bufio.NewScanner(file) + experiment := NewExperiment( + "token=3skip0_error=1%_indexchunks=true", + three, + true, + onePctError, + ) + sbf := experiment.bloom() + b.StartTimer() + for scanner.Scan() { + line := scanner.Text() + tokens := experiment.tokenizer.Tokens(line) + for _, token := range tokens { + sbf.TestAndAdd(token.Key) + } + } } } -func BenchmarkLRU1Get(b *testing.B) { - cache := NewLRUCache(num) - for i := 0; i < num; i++ { - cache.Put(strconv.Itoa(i)) - } - b.ResetTimer() +func BenchmarkSBFAdd(b *testing.B) { for i := 0; i < b.N; i++ { - cache.Get(strconv.Itoa(i)) + b.StopTimer() + file, _ := os.Open(BigFile) + defer file.Close() + scanner := bufio.NewScanner(file) + experiment := NewExperiment( + "token=3skip0_error=1%_indexchunks=true", + three, + true, + onePctError, + ) + sbf := experiment.bloom() + b.StartTimer() + for scanner.Scan() { + line := scanner.Text() + tokens := experiment.tokenizer.Tokens(line) + for _, token := range tokens { + sbf.Add(token.Key) + } + } } } -func BenchmarkLRU2Put(b *testing.B) { - cache := NewLRUCache2(num) +func BenchmarkSBFSeparateTestAndAdd(b *testing.B) { for i := 0; i < b.N; i++ { - cache.Put(strconv.Itoa(i)) + b.StopTimer() + file, _ := os.Open(BigFile) + defer file.Close() + scanner := bufio.NewScanner(file) + experiment := NewExperiment( + "token=3skip0_error=1%_indexchunks=true", + three, + true, + onePctError, + ) + sbf := experiment.bloom() + b.StartTimer() + for scanner.Scan() { + line := scanner.Text() + tokens := experiment.tokenizer.Tokens(line) + for _, token := range tokens { + found := sbf.Test(token.Key) + if !found { + sbf.Add(token.Key) + } + } + } } } -func BenchmarkLRU2Get(b *testing.B) { - cache := NewLRUCache2(num) - for i := 0; i < num; i++ { - cache.Put(strconv.Itoa(i)) - } - b.ResetTimer() +func BenchmarkSBFTestAndAddWithLRU(b *testing.B) { for i := 0; i < b.N; i++ { - cache.Get(strconv.Itoa(i)) + b.StopTimer() + file, _ := os.Open(BigFile) + defer file.Close() + scanner := bufio.NewScanner(file) + experiment := NewExperiment( + "token=3skip0_error=1%_indexchunks=true", + three, + true, + onePctError, + ) + sbf := experiment.bloom() + cache := NewLRUCache4(150000) + b.StartTimer() + for scanner.Scan() { + line := scanner.Text() + tokens := experiment.tokenizer.Tokens(line) + for _, token := range tokens { + if !cache.Get(token.Key) { + cache.Put(token.Key) + sbf.TestAndAdd(token.Key) + } + } + } } } -func BenchmarkLRU4Put(b *testing.B) { - cache := NewLRUCache4(num) +func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) { for i := 0; i < b.N; i++ { - cache.Put([]byte(strconv.Itoa(i))) - } -} + b.StopTimer() + file, _ := os.Open(BigFile) + defer file.Close() + scanner := bufio.NewScanner(file) + experiment := NewExperiment( + "token=3skip0_error=1%_indexchunks=true", + three, + true, + onePctError, + ) + sbf := experiment.bloom() + cache := NewLRUCache4(150000) + b.StartTimer() + for scanner.Scan() { + line := scanner.Text() + tokens := experiment.tokenizer.Tokens(line) + for _, token := range tokens { + if !cache.Get(token.Key) { + cache.Put(token.Key) -func BenchmarkLRU4Get(b *testing.B) { - cache := NewLRUCache4(num) - for i := 0; i < num; i++ { - cache.Put([]byte(strconv.Itoa(i))) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Get([]byte(strconv.Itoa(i))) + found := sbf.Test(token.Key) + if !found { + sbf.Add(token.Key) + } + //sbf.TestAndAdd(token.Key) + } + } + } } } -func BenchmarkSBFTestAndAdd(b *testing.B) { +func BenchmarkSBFSeparateTestAndAddWithLRU5(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - file, _ := os.Open("big.txt") + file, _ := os.Open(BigFile) defer file.Close() scanner := bufio.NewScanner(file) experiment := NewExperiment( @@ -268,21 +157,31 @@ func BenchmarkSBFTestAndAdd(b *testing.B) { onePctError, ) sbf := experiment.bloom() + cache := NewLRUCache5(150000) + b.StartTimer() for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) for _, token := range tokens { - sbf.TestAndAdd(token.Key) + str := string(token.Key) + if !cache.Get(str) { + cache.Put(str) + + found := sbf.Test(token.Key) + if !found { + sbf.Add(token.Key) + } + } } } } } -func BenchmarkSBFAdd(b *testing.B) { +func BenchmarkSBFTestAndAddWithLRU5(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - file, _ := os.Open("big.txt") + file, _ := os.Open(BigFile) defer file.Close() scanner := bufio.NewScanner(file) experiment := NewExperiment( @@ -292,21 +191,92 @@ func BenchmarkSBFAdd(b *testing.B) { onePctError, ) sbf := experiment.bloom() + cache := NewLRUCache5(150000) + b.StartTimer() for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) for _, token := range tokens { - sbf.Add(token.Key) + str := string(token.Key) + if !cache.Get(str) { + cache.Put(str) + + sbf.TestAndAdd(token.Key) + } } } } } -func BenchmarkSBFSeparateTestAndAdd(b *testing.B) { +func BenchmarkSBFTestAndAddWithByteKeyLRU(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + file, _ := os.Open(BigFile) + defer file.Close() + scanner := bufio.NewScanner(file) + experiment := NewExperiment( + "token=4skip0_error=1%_indexchunks=false", + four, + false, + onePctError, + ) + sbf := experiment.bloom() + cache := NewByteKeyLRUCache(150000) + b.StartTimer() + for scanner.Scan() { + line := scanner.Text() + tokens := experiment.tokenizer.Tokens(line) + for _, token := range tokens { + + array := NewFourByteKeyFromSlice(token.Key) + if !cache.Get(array) { + cache.Put(array) + sbf.TestAndAdd(token.Key) + } + + } + } + } +} + +func BenchmarkSBFTestAndAddWithFourByteKeyLRU(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - file, _ := os.Open("big.txt") + file, _ := os.Open(BigFile) + defer file.Close() + scanner := bufio.NewScanner(file) + experiment := NewExperiment( + "token=4skip0_error=1%_indexchunks=false", + four, + false, + onePctError, + ) + sbf := experiment.bloom() + cache := NewFourByteKeyLRUCache(150000) + b.StartTimer() + for scanner.Scan() { + line := scanner.Text() + tokens := experiment.tokenizer.Tokens(line) + for _, token := range tokens { + if !cache.Get([4]byte(token.Key)) { + cache.Put([4]byte(token.Key)) + found := sbf.Test(token.Key) + if !found { + sbf.Add(token.Key) + } + //sbf.TestAndAdd(token.Key) + } + + } + } + } +} + +func BenchmarkSBFAddWithLRU(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + file, _ := os.Open(BigFile) defer file.Close() scanner := bufio.NewScanner(file) experiment := NewExperiment( @@ -316,13 +286,14 @@ func BenchmarkSBFSeparateTestAndAdd(b *testing.B) { onePctError, ) sbf := experiment.bloom() + cache := NewLRUCache4(150000) b.StartTimer() for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) for _, token := range tokens { - found := sbf.Test(token.Key) - if !found { + if !cache.Get(token.Key) { + cache.Put(token.Key) sbf.Add(token.Key) } } @@ -330,10 +301,10 @@ func BenchmarkSBFSeparateTestAndAdd(b *testing.B) { } } -func BenchmarkSBFTestAndAddWithLRU(b *testing.B) { +func BenchmarkSBFSeparateTestAndAddWithLRU1(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - file, _ := os.Open("big.txt") + file, _ := os.Open(BigFile) defer file.Close() scanner := bufio.NewScanner(file) experiment := NewExperiment( @@ -343,25 +314,30 @@ func BenchmarkSBFTestAndAddWithLRU(b *testing.B) { onePctError, ) sbf := experiment.bloom() - cache := NewLRUCache4(150000) + cache := NewLRUCache(150000) b.StartTimer() for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) for _, token := range tokens { - if !cache.Get(token.Key) { - cache.Put(token.Key) - sbf.TestAndAdd(token.Key) + str := string(token.Key) + if !cache.Get(str) { + cache.Put(str) + found := sbf.Test(token.Key) + if !found { + sbf.Add(token.Key) + } + //sbf.Add(token.Key) } } } } } -func BenchmarkSBFAddWithLRU(b *testing.B) { +func BenchmarkSBFSeparateTestAndAddWithMap(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - file, _ := os.Open("big.txt") + file, _ := os.Open(BigFile) defer file.Close() scanner := bufio.NewScanner(file) experiment := NewExperiment( @@ -371,15 +347,27 @@ func BenchmarkSBFAddWithLRU(b *testing.B) { onePctError, ) sbf := experiment.bloom() - cache := NewLRUCache4(150000) + cache := make(map[string]interface{}, 150000) b.StartTimer() for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) for _, token := range tokens { - if !cache.Get(token.Key) { - cache.Put(token.Key) - sbf.Add(token.Key) + str := string(token.Key) + + _, found := cache[str] + if !found { + cache[str] = "" + f := sbf.Test(token.Key) + if !f { + sbf.Add(token.Key) + } + + if len(cache) > 150000 { + for elem := range cache { + delete(cache, elem) + } + } } } } diff --git a/tools/tsdb/bloom-tester/lrucache.go b/tools/tsdb/bloom-tester/lrucache.go index 160163a920e58..56caba451f1fd 100644 --- a/tools/tsdb/bloom-tester/lrucache.go +++ b/tools/tsdb/bloom-tester/lrucache.go @@ -1,6 +1,9 @@ package main -import "container/list" +import ( + "container/list" + "fmt" +) type LRUCache struct { capacity int @@ -267,3 +270,337 @@ func (c *HashSet) Clear() { delete(c.cache, k) } } + +// ByteKey is an interface for types that represent keys of a certain size. +type ByteKey interface { + Size() int + Equal(other ByteKey) bool +} + +// FourByteKey represents a key of 4 bytes. +type FourByteKey [4]byte + +// Size returns the size of the FourByteKey. +func (k FourByteKey) Size() int { + return 4 +} + +// Equal checks if two FourByteKeys are equal. +func (k FourByteKey) Equal(other ByteKey) bool { + if otherFourByteKey, ok := other.(FourByteKey); ok { + return k == otherFourByteKey + } + return false +} + +// ThirtyOneByteKey represents a key of 31 bytes. +type ThirtyOneByteKey [31]byte + +// Size returns the size of the ThirtyOneByteKey. +func (k ThirtyOneByteKey) Size() int { + return 31 +} + +// Equal checks if two ThirtyOneByteKeys are equal. +func (k ThirtyOneByteKey) Equal(other ByteKey) bool { + if otherThirtyOneByteKey, ok := other.(ThirtyOneByteKey); ok { + return k == otherThirtyOneByteKey + } + return false +} + +type ByteKeyLRUCache struct { + capacity int + //m map[ByteKey]struct{} + m map[ByteKey]*list.Element + list *list.List +} + +func NewByteKeyLRUCache(capacity int) *ByteKeyLRUCache { + return &ByteKeyLRUCache{ + capacity: capacity, + m: make(map[ByteKey]*list.Element, capacity), + list: list.New(), + } +} + +func (c *ByteKeyLRUCache) Get(key ByteKey) bool { + if value, ok := c.m[key]; ok { + // Move the accessed element to the front of the list + c.list.MoveToFront(value) + return true + } + return false +} + +func (c *ByteKeyLRUCache) Put(key ByteKey) { + if value, ok := c.m[key]; ok { + // If the key already exists, move it to the front + c.list.MoveToFront(value) + } else { + // If the cache is full, remove the least recently used element + if len(c.m) >= c.capacity { + // Get the least recently used element from the back of the list + tailElem := c.list.Back() + if tailElem != nil { + deletedEntry := c.list.Remove(tailElem).(ByteKey) + delete(c.m, deletedEntry) + } + } + + // Add the new key to the cache and the front of the list + elem := c.list.PushFront(key) + c.m[key] = elem + } +} + +func (c *ByteKeyLRUCache) Clear() { + // Iterate through the list and remove all elements + for elem := c.list.Front(); elem != nil; elem = elem.Next() { + delete(c.m, elem.Value.(ByteKey)) + } + + // Clear the list + c.list.Init() +} + +// ByteKeyMap is a map that uses ByteKey as a key. +type ByteKeyMap struct { + capacity int + m map[ByteKey]struct{} +} + +// NewByteKeyMap creates a new ByteKeyMap. +func NewByteKeyMap(capacity int) ByteKeyMap { + return ByteKeyMap{ + capacity: capacity, + m: make(map[ByteKey]struct{}, capacity), + } +} + +// Put adds an entry to the map. +func (bm *ByteKeyMap) Put(key ByteKey) { + bm.m[key] = struct{}{} +} + +// Get retrieves a value from the map based on the key. +func (bm *ByteKeyMap) Get(key ByteKey) bool { + _, exists := bm.m[key] + return exists +} + +type ByteSet struct { + capacity int + cache map[[4]byte]struct{} +} + +func NewByteSet(capacity int) *ByteSet { + return &ByteSet{ + capacity: capacity, + cache: make(map[[4]byte]struct{}), + } +} + +func sliceToByteArray(slice []byte) [4]byte { + // Define the desired size of the byte array + // If you want to make it dynamically sized, use len(slice) + var array [4]byte + + // Copy elements from the slice to the array + copy(array[:], slice) + + return array +} + +// NewFourByteKeyFromSlice converts a byte slice to a FourByteKey. +func NewFourByteKeyFromSlice(slice []byte) FourByteKey { + var key FourByteKey + copy(key[:], slice) + return key +} + +// NewThirtyOneByteKeyFromSlice converts a byte slice to a FourByteKey. +func NewThirtyOneByteKeyFromSlice(slice []byte) ThirtyOneByteKey { + var key ThirtyOneByteKey + copy(key[:], slice) + return key +} + +func (c ByteSet) Get(key string) bool { + if _, ok := c.cache[sliceToByteArray([]byte(key))]; ok { + return true + } + return false +} + +func (c *ByteSet) Put(key string) { + c.cache[sliceToByteArray([]byte(key))] = struct{}{} +} + +func (c *ByteSet) PutBytes(value []byte) { + c.cache[sliceToByteArray(value)] = struct{}{} +} + +func (c *ByteSet) Clear() { + for k := range c.cache { + delete(c.cache, k) + } +} + +type FourByteKeyLRUCache struct { + capacity int + m map[[4]byte]*list.Element + list *list.List +} + +func NewFourByteKeyLRUCache(capacity int) *FourByteKeyLRUCache { + return &FourByteKeyLRUCache{ + capacity: capacity, + m: make(map[[4]byte]*list.Element, capacity), + list: list.New(), + } +} + +func (c *FourByteKeyLRUCache) Get(key [4]byte) bool { + if value, ok := c.m[key]; ok { + // Move the accessed element to the front of the list + c.list.MoveToFront(value) + return true + } + return false +} + +func (c *FourByteKeyLRUCache) Put(key [4]byte) { + if value, ok := c.m[key]; ok { + // If the key already exists, move it to the front + c.list.MoveToFront(value) + } else { + // If the cache is full, remove the least recently used element + if len(c.m) >= c.capacity { + // Get the least recently used element from the back of the list + tailElem := c.list.Back() + if tailElem != nil { + deletedEntry := c.list.Remove(tailElem).([4]byte) + delete(c.m, deletedEntry) + } + } + + // Add the new key to the cache and the front of the list + elem := c.list.PushFront(key) + c.m[key] = elem + } +} + +func (c *FourByteKeyLRUCache) Clear() { + // Iterate through the list and remove all elements + for elem := c.list.Front(); elem != nil; elem = elem.Next() { + delete(c.m, elem.Value.([4]byte)) + } + + // Clear the list + c.list.Init() +} + +type LRUCache5 struct { + capacity int + cache map[string]*LRUNode5 + head *LRUNode5 + tail *LRUNode5 +} + +type LRUNode5 struct { + key string + prev *LRUNode5 + next *LRUNode5 +} + +func NewLRUCache5(capacity int) *LRUCache5 { + return &LRUCache5{ + capacity: capacity, + } +} +func (c *LRUCache5) init() { + c.cache = make(map[string]*LRUNode5, c.capacity) + c.head = new(LRUNode5) + c.tail = new(LRUNode5) + c.head.next = c.tail + c.tail.prev = c.head +} + +func (c *LRUCache5) pop(item *LRUNode5) { + item.prev.next = item.next + item.next.prev = item.prev +} + +func (c *LRUCache5) push(item *LRUNode5) { + c.head.next.prev = item + item.next = c.head.next + item.prev = c.head + c.head.next = item +} + +func (c *LRUCache5) evict() *LRUNode5 { + item := c.tail.prev + c.pop(item) + delete(c.cache, item.key) + return item +} + +func (c *LRUCache5) Get(key string) bool { + if c.cache == nil { + c.init() + } + item := c.cache[key] + if item == nil { + return false + } + if c.head.next != item { + c.pop(item) + c.push(item) + } + return true +} + +func (c *LRUCache5) Put(key string) { + if c.cache == nil { + c.init() + } + item := c.cache[key] + if item == nil { + if len(c.cache) == c.capacity { + item = c.evict() + } else { + item = new(LRUNode5) + } + item.key = key + c.push(item) + c.cache[key] = item + } else { + if c.head.next != item { + c.pop(item) + c.push(item) + } + } +} + +func (c *LRUCache5) Clear() { + if c.cache != nil { + + for elem := range c.cache { + delete(c.cache, elem) + } + + c.head = nil + c.tail = nil + } +} + +func (c *LRUCache5) Dump() { + if c.cache != nil { + + for elem := range c.cache { + fmt.Println(elem) + } + + } +} diff --git a/tools/tsdb/bloom-tester/lrucache_test.go b/tools/tsdb/bloom-tester/lrucache_test.go new file mode 100644 index 0000000000000..c1125af01132c --- /dev/null +++ b/tools/tsdb/bloom-tester/lrucache_test.go @@ -0,0 +1,206 @@ +package main + +import ( + "encoding/binary" + "github.com/stretchr/testify/require" + "strconv" + "testing" +) + +var num = 1000000 + +func BenchmarkLRU1Put(b *testing.B) { + cache := NewLRUCache(num) + for i := 0; i < b.N; i++ { + cache.Put(strconv.Itoa(i)) + } +} + +func BenchmarkLRU1Get(b *testing.B) { + cache := NewLRUCache(num) + for i := 0; i < num; i++ { + cache.Put(strconv.Itoa(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Get(strconv.Itoa(i)) + } +} + +func BenchmarkLRU2Put(b *testing.B) { + cache := NewLRUCache2(num) + for i := 0; i < b.N; i++ { + cache.Put(strconv.Itoa(i)) + } +} + +func BenchmarkLRU2Get(b *testing.B) { + cache := NewLRUCache2(num) + for i := 0; i < num; i++ { + cache.Put(strconv.Itoa(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Get(strconv.Itoa(i)) + } +} + +func BenchmarkLRU4Put(b *testing.B) { + cache := NewLRUCache4(num) + for i := 0; i < b.N; i++ { + cache.Put([]byte(strconv.Itoa(i))) + } +} + +func BenchmarkLRU4Get(b *testing.B) { + cache := NewLRUCache4(num) + for i := 0; i < num; i++ { + cache.Put([]byte(strconv.Itoa(i))) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Get([]byte(strconv.Itoa(i))) + } +} + +func TestByteSet(t *testing.T) { + set := NewByteSet(30) + set.Put("fooa") + set.PutBytes([]byte("foob")) + for _, tc := range []struct { + desc string + input string + exp bool + }{ + { + desc: "test string put", + input: "fooa", + exp: true, + }, + { + desc: "test byte put", + input: "foob", + exp: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + require.Equal(t, tc.exp, set.Get(tc.input)) + }) + } +} + +func TestByteKeyLRUCache(t *testing.T) { + set := NewByteKeyLRUCache(30) + set.Put(NewFourByteKeyFromSlice([]byte("fooa"))) + //set.PutBytes([]byte("foob")) + for _, tc := range []struct { + desc string + input string + exp bool + }{ + { + desc: "test valid", + input: "fooa", + exp: true, + }, + { + desc: "test not valid", + input: "foob", + exp: false, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + require.Equal(t, tc.exp, set.Get(NewFourByteKeyFromSlice([]byte(tc.input)))) + }) + } +} + +func TestLRUCache5(t *testing.T) { + set := NewLRUCache5(30) + set.Put("fooa") + for _, tc := range []struct { + desc string + input string + exp bool + }{ + { + desc: "test valid", + input: "fooa", + exp: true, + }, + { + desc: "test not valid", + input: "foob", + exp: false, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + require.Equal(t, tc.exp, set.Get(tc.input)) + }) + } +} + +func BenchmarkLRU5Put(b *testing.B) { + cache := NewLRUCache5(num) + for i := 0; i < b.N; i++ { + cache.Put(strconv.Itoa(i)) + } +} + +func BenchmarkLRU5Get(b *testing.B) { + cache := NewLRUCache5(num) + for i := 0; i < num; i++ { + cache.Put(strconv.Itoa(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Get(strconv.Itoa(i)) + } +} + +func BenchmarkByteKeyLRUCacheSet(b *testing.B) { + buf := make([]byte, 26) + cache := NewByteKeyLRUCache(num) + for i := 0; i < b.N; i++ { + binary.LittleEndian.PutUint64(buf, uint64(i)) + + cache.Put(NewThirtyOneByteKeyFromSlice(buf)) + } +} + +func BenchmarkByteKeyLRUCacheGet(b *testing.B) { + buf := make([]byte, 26) + + cache := NewByteKeyLRUCache(num) + for i := 0; i < b.N; i++ { + binary.LittleEndian.PutUint64(buf, uint64(i)) + + cache.Put(NewThirtyOneByteKeyFromSlice(buf)) + //cache.Put(NewTwentySixByteKeyFromSlice([]byte(strconv.Itoa(i)))) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + binary.LittleEndian.PutUint64(buf, uint64(i)) + + cache.Get(NewThirtyOneByteKeyFromSlice(buf)) + //cache.Get(NewTwentySixByteKeyFromSlice([]byte(strconv.Itoa(i)))) + } +} + +func BenchmarkByteSetPut(b *testing.B) { + cache := NewByteSet(num) + for i := 0; i < b.N; i++ { + cache.Put(strconv.Itoa(i)) + } +} + +func BenchmarkByteSetGet(b *testing.B) { + cache := NewByteSet(num) + for i := 0; i < b.N; i++ { + cache.Put(strconv.Itoa(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Get(strconv.Itoa(i)) + } +} diff --git a/tools/tsdb/bloom-tester/metrics.go b/tools/tsdb/bloom-tester/metrics.go index 0b1d5d54995a8..c330d7edb8d23 100644 --- a/tools/tsdb/bloom-tester/metrics.go +++ b/tools/tsdb/bloom-tester/metrics.go @@ -1,6 +1,7 @@ package main import ( + bt "github.com/grafana/loki/pkg/storage/bloom/v1" "github.com/grafana/loki/pkg/storage/bloom/v1/filter" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -8,12 +9,12 @@ import ( type Experiment struct { name string - tokenizer Tokenizer + tokenizer bt.Tokenizer bloom func() *filter.ScalableBloomFilter encodeChunkID bool } -func NewExperiment(name string, tokenizer Tokenizer, encodeChunkID bool, bloom func() *filter.ScalableBloomFilter) Experiment { +func NewExperiment(name string, tokenizer bt.Tokenizer, encodeChunkID bool, bloom func() *filter.ScalableBloomFilter) Experiment { return Experiment{ name: name, tokenizer: tokenizer, diff --git a/tools/tsdb/bloom-tester/readlib.go b/tools/tsdb/bloom-tester/readlib.go index 4d70b034635f6..7f1dc51f933ea 100644 --- a/tools/tsdb/bloom-tester/readlib.go +++ b/tools/tsdb/bloom-tester/readlib.go @@ -9,6 +9,7 @@ import ( "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql/log" + bt "github.com/grafana/loki/pkg/storage/bloom/v1" "github.com/grafana/loki/pkg/storage/bloom/v1/filter" "github.com/grafana/loki/pkg/storage/chunk" "github.com/grafana/loki/pkg/storage/config" @@ -198,15 +199,15 @@ func analyzeRead(metrics *Metrics, sampler Sampler, shipper indexshipper.IndexSh objectClient) for gotIdx := range got { // for every chunk for _, queryExperiment := range queryExperiments { // for each search string - if len(queryExperiment.searchString) >= experiment.tokenizer.getMin()+experiment.tokenizer.getSkip() { + if len(queryExperiment.searchString) >= experiment.tokenizer.GetMin()+experiment.tokenizer.GetSkip() { foundInChunk := false foundInSbf := false - chunkTokenizer := ChunkIDTokenizerHalfInit(experiment.tokenizer) + chunkTokenizer := bt.ChunkIDTokenizer(experiment.tokenizer) - chunkTokenizer.reinit(got[gotIdx].ChunkRef) - var tokenizer Tokenizer = chunkTokenizer + chunkTokenizer.Reinit(got[gotIdx].ChunkRef) + var tokenizer bt.Tokenizer = chunkTokenizer if !experiment.encodeChunkID { tokenizer = experiment.tokenizer } @@ -309,10 +310,10 @@ func readSBFFromObjectStorage(location, prefix, period, tenant, series string, o return sbf } -func searchSbf(sbf *filter.ScalableBloomFilter, tokenizer Tokenizer, searchString string) bool { - for i := 0; i <= tokenizer.getSkip(); i++ { +func searchSbf(sbf *filter.ScalableBloomFilter, tokenizer bt.Tokenizer, searchString string) bool { + for i := 0; i <= tokenizer.GetSkip(); i++ { numMatches := 0 - if (len(searchString) - i) >= tokenizer.getMin() { + if (len(searchString) - i) >= tokenizer.GetMin() { tokens := tokenizer.Tokens(searchString[i:]) for _, token := range tokens { diff --git a/tools/tsdb/bloom-tester/tokenizer.go b/tools/tsdb/bloom-tester/tokenizer.go deleted file mode 100644 index e0e5d9e5b5ff5..0000000000000 --- a/tools/tsdb/bloom-tester/tokenizer.go +++ /dev/null @@ -1,255 +0,0 @@ -package main - -import ( - "encoding/binary" - "unicode/utf8" - - "github.com/grafana/loki/pkg/logproto" -) - -type Token struct { - Key []byte - Value string -} - -type Tokenizer interface { - Tokens(line string) []Token - getSkip() int - getMin() int - getMax() int -} - -/* -type logfmtTokenizer struct { - parser *log.LogfmtParser - lbls *log.LabelsBuilder -} - -func (t *logfmtTokenizer) Tokens(line string) []Token { - t.lbls.Reset() - t.parser.Process(0, []byte(line), t.lbls) - ls := t.lbls.LabelsResult().Labels() - res := make([]Token, 0, len(ls)) - for _, l := range ls { - res = append(res, Token{Key: l.Name, Value: l.Value}) - } - return res -} - -func newLogfmtTokenizer() *logfmtTokenizer { - return &logfmtTokenizer{ - // non strict, allow empty values - parser: log.NewLogfmtParser(false, true), - lbls: log.NewBaseLabelsBuilder().ForLabels(nil, 0), - } -} - -*/ - -type ngramTokenizer struct { - // [min,max) exclusivity - min, max, skip int - buffers [][]rune // circular buffers used for ngram generation - runeBuffer []byte // buffer used for token generation - tokenBuffer []Token // buffer used for holding tokens that is returned - internalTokenBuffer []Token // circular buffer for tokens -} - -func newNGramTokenizer(min, max, skip int) *ngramTokenizer { - capacity := max - min - t := &ngramTokenizer{ - min: min, - max: max, - skip: skip, - buffers: make([][]rune, capacity), - } - for i := t.min; i < t.max; i++ { - t.buffers[i-t.min] = make([]rune, i) - } - t.runeBuffer = make([]byte, 0, max*4) - t.tokenBuffer = make([]Token, 0, 1024) - t.internalTokenBuffer = make([]Token, 0, 1024) - for i := 0; i < cap(t.internalTokenBuffer); i++ { - tok := Token{} - tok.Key = make([]byte, 0, 132) - t.internalTokenBuffer = append(t.internalTokenBuffer, tok) - } - - return t -} - -func (t *ngramTokenizer) getSkip() int { - return t.skip -} - -func (t *ngramTokenizer) getMin() int { - return t.min -} - -func (t *ngramTokenizer) getMax() int { - return t.max -} - -func (t *ngramTokenizer) Tokens(line string) []Token { - t.tokenBuffer = t.tokenBuffer[:0] // Reset the result slice - var i int // rune index (not position that is measured in the range loop) - numToks := 0 - for _, r := range line { - - // j is the index of the buffer to use - for j := 0; j < (t.max - t.min); j++ { - // n is the length of the ngram - n := j + t.min - // pos is the position in the buffer to overwrite - pos := i % n - t.buffers[j][pos] = r - - if i >= n-1 && (i+1-n)%(t.skip+1) == 0 { - t.runeBuffer = reassemble(t.buffers[j], (i+1)%n, t.runeBuffer) - //fmt.Println(numToks, cap(t.internalTokenBuffer), len(t.internalTokenBuffer)) - if numToks >= cap(t.internalTokenBuffer) || numToks == len(t.internalTokenBuffer) { - tok := Token{} - tok.Key = make([]byte, 0, 132) - t.internalTokenBuffer = append(t.internalTokenBuffer, tok) - } - //fmt.Println(numToks, cap(t.internalTokenBuffer), len(t.internalTokenBuffer)) - t.internalTokenBuffer[numToks].Key = t.internalTokenBuffer[numToks].Key[:0] - t.internalTokenBuffer[numToks].Key = append(t.internalTokenBuffer[numToks].Key, t.runeBuffer...) - t.internalTokenBuffer[numToks].Value = string(t.internalTokenBuffer[numToks].Key) - numToks++ - } - } - i++ - } - t.tokenBuffer = append(t.tokenBuffer, t.internalTokenBuffer[:numToks]...) - return t.tokenBuffer -} - -func (t *ngramTokenizer) OldTokens(line string) []Token { - t.tokenBuffer = t.tokenBuffer[:0] // Reset the result slice - var i int // rune index (not position that is measured in the range loop) - for _, r := range line { - - // j is the index of the buffer to use - for j := 0; j < (t.max - t.min); j++ { - // n is the length of the ngram - n := j + t.min - // pos is the position in the buffer to overwrite - pos := i % n - t.buffers[j][pos] = r - - if i >= n-1 && (i+1-n)%(t.skip+1) == 0 { - t.runeBuffer = reassemble(t.buffers[j], (i+1)%n, t.runeBuffer) - b := Token{} - b.Key = make([]byte, 0, 132) // TODO: Yeah, that's too big but I didn't fee like doing the math at the end of the day - b.Key = append(b.Key, t.runeBuffer...) - b.Value = string(b.Key) - t.tokenBuffer = append(t.tokenBuffer, b) - } - } - i++ - } - return t.tokenBuffer -} - -func reassemble(buf []rune, pos int, result []byte) []byte { - result = result[:0] // Reset the result slice - for i := 0; i < len(buf); i++ { - cur := (pos + i) % len(buf) - result = utf8.AppendRune(result, buf[cur]) - } - return result -} - -type WrappedTokenizer struct { - t Tokenizer - f func(Token) Token - tokenBuffer []Token - prefix []byte - i64buf []byte - i32buf []byte -} - -func (w *WrappedTokenizer) Tokens(line string) []Token { - w.tokenBuffer = w.tokenBuffer[:0] // Reset the result slice - toks := w.t.Tokens(line) - for _, tok := range toks { - w.tokenBuffer = append(w.tokenBuffer, w.f(tok)) - } - return append(w.tokenBuffer, toks...) -} - -func (w *WrappedTokenizer) getSkip() int { - return w.t.getSkip() -} - -func (w *WrappedTokenizer) getMin() int { - return w.t.getMin() -} - -func (w *WrappedTokenizer) getMax() int { - return w.t.getMax() -} - -func ChunkIDTokenizer(chk logproto.ChunkRef, t Tokenizer) *WrappedTokenizer { - //prefix := fmt.Sprintf("%d:%d:%d:", chk.From, chk.Through, chk.Checksum) - p := make([]byte, 0, 256) - i64buf := make([]byte, binary.MaxVarintLen64) - i32buf := make([]byte, 4) - - binary.PutVarint(i64buf, int64(chk.From)) - p = append(p, i64buf...) - p = append(p, 58) - binary.PutVarint(i64buf, int64(chk.Through)) - p = append(p, i64buf...) - p = append(p, 58) - binary.LittleEndian.PutUint32(i32buf, chk.Checksum) - p = append(p, i32buf...) - p = append(p, 58) - - return &WrappedTokenizer{ - t: t, - f: func(tok Token) Token { - tok.Key = append(append(tok.Key, p...), tok.Key...)[len(tok.Key):] - tok.Value = string(tok.Key) - return tok - }, - tokenBuffer: make([]Token, 0, 1024), - prefix: p, - i64buf: i64buf, - i32buf: i32buf, - } -} - -func ChunkIDTokenizerHalfInit(t Tokenizer) *WrappedTokenizer { - p := make([]byte, 0, 256) - return &WrappedTokenizer{ - t: t, - tokenBuffer: make([]Token, 0, 1024), - prefix: p, - i64buf: make([]byte, binary.MaxVarintLen64), - i32buf: make([]byte, 4), - } -} - -func (w *WrappedTokenizer) reinit(chk logproto.ChunkRef) { - //prefix := fmt.Sprintf("%d:%d:%d:", chk.From, chk.Through, chk.Checksum) - w.prefix = w.prefix[:0] - - //w.prefix = fmt.Appendf(w.prefix, "%d:%d:%d:", chk.From, chk.Through, chk.Checksum) - binary.PutVarint(w.i64buf, int64(chk.From)) - w.prefix = append(w.prefix, w.i64buf...) - w.prefix = append(w.prefix, 58) - binary.PutVarint(w.i64buf, int64(chk.Through)) - w.prefix = append(w.prefix, w.i64buf...) - w.prefix = append(w.prefix, 58) - binary.LittleEndian.PutUint32(w.i32buf, chk.Checksum) - w.prefix = append(w.prefix, w.i32buf...) - w.prefix = append(w.prefix, 58) - - w.f = func(tok Token) Token { - tok.Key = append(append(tok.Key, w.prefix...), tok.Key...)[len(tok.Key):] - tok.Value = string(tok.Key) - return tok - } -} From 1781d7c700d04ad57fbdc8cce0eb009e06d62900 Mon Sep 17 00:00:00 2001 From: Ashwanth Date: Fri, 27 Oct 2023 11:38:26 +0530 Subject: [PATCH 32/33] ksonnet: do not deploy table manager when shipper is in-use (#11020) **What this PR does / why we need it**: Do not generate table manager manifests if shipper is being used. table manager is a deprecated target and is not recommended when using `tsdb` or `boltdb-shipper` indexes. Compactor is already bunlded along with shipper and handles retention, compaction for the `tsdb` and `boltb-shipper` indexes **Which issue(s) this PR fixes**: Fixes [#10943](https://github.com/grafana/loki/issues/10943) **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [x] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. --- CHANGELOG.md | 2 ++ production/ksonnet/loki/loki.libsonnet | 3 ++- .../ksonnet/loki/table-manager.libsonnet | 20 +++++++++++++------ 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e40e765342d6..879340ed05181 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,8 @@ #### Jsonnet +* [11020](https://github.com/grafana/loki/pull/11020) **ashwanthgoli**: Loki ksonnet: Do not generate table-manager manifests if shipper store is in-use. + * [10784](https://github.com/grafana/loki/pull/10894) **slim-bean** Update index gateway client to use a headless service. * [10542](https://github.com/grafana/loki/pull/10542) **chaudum**: Remove legacy deployment mode for ingester (Deployment, without WAL) and instead always run them as StatefulSet. diff --git a/production/ksonnet/loki/loki.libsonnet b/production/ksonnet/loki/loki.libsonnet index 199fb9e757f6d..ad0489a69cd3f 100644 --- a/production/ksonnet/loki/loki.libsonnet +++ b/production/ksonnet/loki/loki.libsonnet @@ -9,7 +9,6 @@ (import 'distributor.libsonnet') + (import 'ingester.libsonnet') + (import 'querier.libsonnet') + -(import 'table-manager.libsonnet') + (import 'query-frontend.libsonnet') + (import 'ruler.libsonnet') + @@ -27,6 +26,8 @@ // BoltDB and TSDB Shipper support. Anything that modifies the compactor must be imported after this. (import 'shipper.libsonnet') + +(import 'table-manager.libsonnet') + + // Multi-zone ingester related config (import 'multi-zone.libsonnet') + diff --git a/production/ksonnet/loki/table-manager.libsonnet b/production/ksonnet/loki/table-manager.libsonnet index 16ffb97e3802f..df1f7338af075 100644 --- a/production/ksonnet/loki/table-manager.libsonnet +++ b/production/ksonnet/loki/table-manager.libsonnet @@ -17,7 +17,7 @@ local k = import 'ksonnet-util/kausal.libsonnet'; 'bigtable.table-cache.enabled': true, }, - table_manager_container:: + table_manager_container:: if !$._config.using_shipper_store then container.new('table-manager', $._images.tableManager) + container.withPorts($.util.defaultPorts) + container.withArgsMixin(k.util.mapToFlags($.table_manager_args)) + @@ -27,15 +27,23 @@ local k = import 'ksonnet-util/kausal.libsonnet'; container.mixin.readinessProbe.withTimeoutSeconds(1) + container.withEnvMixin($._config.commonEnvs) + k.util.resourcesRequests('100m', '100Mi') + - k.util.resourcesLimits('200m', '200Mi'), + k.util.resourcesLimits('200m', '200Mi') + else {}, local deployment = k.apps.v1.deployment, - table_manager_deployment: + table_manager_deployment: if !$._config.using_shipper_store then deployment.new('table-manager', 1, [$.table_manager_container]) + $.config_hash_mixin + - k.util.configVolumeMount('loki', '/etc/loki/config'), + k.util.configVolumeMount('loki', '/etc/loki/config') + else {}, - table_manager_service: - k.util.serviceFor($.table_manager_deployment, $._config.service_ignored_labels), + + table_manager_service: if !$._config.using_shipper_store then + k.util.serviceFor($.table_manager_deployment, $._config.service_ignored_labels) + else {}, + + _config+: { + table_manager: if !$._config.using_shipper_store then super.table_manager else null, + }, } From 2327789b5506d0ccc00d931195da17a2d47bf236 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Oct 2023 12:17:35 +0530 Subject: [PATCH 33/33] fix(deps): update github.com/grafana/gomemcache digest to 6947259 (main) (#10836) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [github.com/grafana/gomemcache](https://togithub.com/grafana/gomemcache) | require | digest | `70d78ea` -> `6947259` | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/grafana/loki). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 +- .../grafana/gomemcache/memcache/memcache.go | 42 +++++++++++++++---- vendor/modules.txt | 2 +- 4 files changed, 39 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index fd6104c1c40bb..d7dfb90347211 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 github.com/grafana/go-gelf/v2 v2.0.1 - github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1 + github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 diff --git a/go.sum b/go.sum index 792b91d419e70..7bf146121d376 100644 --- a/go.sum +++ b/go.sum @@ -987,8 +987,8 @@ github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQa github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= -github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1 h1:MLYY2R60/74hfYl5vRRmC2VDo0Yuql1QQ1ig8hnvgSI= -github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= +github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= diff --git a/vendor/github.com/grafana/gomemcache/memcache/memcache.go b/vendor/github.com/grafana/gomemcache/memcache/memcache.go index f6b9cfdbf4b12..67288a12fb78a 100644 --- a/vendor/github.com/grafana/gomemcache/memcache/memcache.go +++ b/vendor/github.com/grafana/gomemcache/memcache/memcache.go @@ -48,7 +48,7 @@ var ( // CompareAndSwap) failed because the condition was not satisfied. ErrNotStored = errors.New("memcache: item not stored") - // ErrServer means that a server error occurred. + // ErrServerError means that a server error occurred. ErrServerError = errors.New("memcache: server error") // ErrNoStats means that no statistics were available. @@ -175,6 +175,14 @@ type Client struct { // be set to a number higher than your peak parallel requests. MaxIdleConns int + // WriteBufferSizeBytes specifies the size of the write buffer (in bytes). The buffer + // is allocated for each connection. If <= 0, the default value of 4KB will be used. + WriteBufferSizeBytes int + + // ReadBufferSizeBytes specifies the size of the read buffer (in bytes). The buffer + // is allocated for each connection. If <= 0, the default value of 4KB will be used. + ReadBufferSizeBytes int + // recentlyUsedConnsThreshold is the default grace period given to an // idle connection to consider it "recently used". Recently used connections // are never closed even if idle. @@ -402,6 +410,11 @@ func (c *Client) dial(addr net.Addr) (net.Conn, error) { } func (c *Client) getConn(addr net.Addr) (*conn, error) { + var ( + writer *bufio.Writer + reader *bufio.Reader + ) + cn, ok := c.getFreeConn(addr) if ok { cn.extendDeadline() @@ -411,17 +424,32 @@ func (c *Client) getConn(addr net.Addr) (*conn, error) { if err != nil { return nil, err } + + // Init buffered writer. + if c.WriteBufferSizeBytes > 0 { + writer = bufio.NewWriterSize(nc, c.WriteBufferSizeBytes) + } else { + writer = bufio.NewWriter(nc) + } + + // Init buffered reader. + if c.ReadBufferSizeBytes > 0 { + reader = bufio.NewReaderSize(nc, c.ReadBufferSizeBytes) + } else { + reader = bufio.NewReader(nc) + } + cn = &conn{ nc: nc, addr: addr, - rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)), + rw: bufio.NewReadWriter(reader, writer), c: c, } cn.extendDeadline() return cn, nil } -func (c *Client) onItem(item *Item, operation string, fn func(*Client, *bufio.ReadWriter, *Item) error) error { +func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error { addr, err := c.selector.PickServer(item.Key) if err != nil { return err @@ -706,7 +734,7 @@ func cut(s string, sep byte) (before, after string, found bool) { // Set writes the given item, unconditionally. func (c *Client) Set(item *Item) error { - return c.onItem(item, "set", (*Client).set) + return c.onItem(item, (*Client).set) } func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { @@ -716,7 +744,7 @@ func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { // Add writes the given item, if no value already exists for its // key. ErrNotStored is returned if that condition is not met. func (c *Client) Add(item *Item) error { - return c.onItem(item, "add", (*Client).add) + return c.onItem(item, (*Client).add) } func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { @@ -726,7 +754,7 @@ func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { // Replace writes the given item, but only if the server *does* // already hold data for this key func (c *Client) Replace(item *Item) error { - return c.onItem(item, "replace", (*Client).replace) + return c.onItem(item, (*Client).replace) } func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error { @@ -741,7 +769,7 @@ func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error { // calls. ErrNotStored is returned if the value was evicted in between // the calls. func (c *Client) CompareAndSwap(item *Item) error { - return c.onItem(item, "cas", (*Client).cas) + return c.onItem(item, (*Client).cas) } func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error { diff --git a/vendor/modules.txt b/vendor/modules.txt index cafc9755c6684..8b26e541da528 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -900,7 +900,7 @@ github.com/grafana/dskit/user # github.com/grafana/go-gelf/v2 v2.0.1 ## explicit; go 1.17 github.com/grafana/go-gelf/v2/gelf -# github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1 +# github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 ## explicit; go 1.18 github.com/grafana/gomemcache/memcache # github.com/grafana/loki/pkg/push v0.0.0-20231023154132-0a7737e7c7eb => ./pkg/push