diff --git a/.chloggen/codeboten_rm-unmaintained-component.yaml b/.chloggen/codeboten_rm-unmaintained-component.yaml deleted file mode 100644 index e3dac9bff4dc..000000000000 --- a/.chloggen/codeboten_rm-unmaintained-component.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Use this changelog template to create an entry for release notes. - -# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: breaking - -# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) -component: googlecloudspannerreceiver - -# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Removing unmaintained component - -# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [35724] - -# (Optional) One or more lines of additional information to render under the primary note. -# These lines will be padded with 2 spaces and then inserted directly into the document. -# Use pipe (|) for multiline entries. -subtext: - -# If your change doesn't affect end users or the exported elements of any package, -# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. -# Optional: The change log or logs in which this entry should be included. -# e.g. '[user]' or '[user, api]' -# Include 'user' if the change is relevant to end users. -# Include 'api' if there is a change to a library API. -# Default: '[user]' -change_logs: [] diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 39b693a84be9..937095d51077 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -243,6 +243,7 @@ receiver/fluentforwardreceiver/ @open-teleme receiver/githubreceiver/ @open-telemetry/collector-contrib-approvers @adrielp @andrzej-stencel @crobert-1 @TylerHelmuth receiver/googlecloudmonitoringreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @TylerHelmuth @abhishek-at-cloudwerx receiver/googlecloudpubsubreceiver/ @open-telemetry/collector-contrib-approvers @alexvanboxel +receiver/googlecloudspannerreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @dsimil @KiranmayiB @harishbohara11 receiver/haproxyreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @MovieStoreGuy receiver/hostmetricsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @braydonk receiver/httpcheckreceiver/ @open-telemetry/collector-contrib-approvers @codeboten diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 0433fefe27b2..9002805c08d8 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -237,6 +237,7 @@ body: - receiver/github - receiver/googlecloudmonitoring - receiver/googlecloudpubsub + - receiver/googlecloudspanner - receiver/haproxy - receiver/hostmetrics - receiver/httpcheck diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index ae3465dfba4a..0a0d49a2e740 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -231,6 +231,7 @@ body: - receiver/github - receiver/googlecloudmonitoring - receiver/googlecloudpubsub + - receiver/googlecloudspanner - receiver/haproxy - receiver/hostmetrics - receiver/httpcheck diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index 183a5e514639..0a02f8039eab 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -231,6 +231,7 @@ body: - receiver/github - receiver/googlecloudmonitoring - receiver/googlecloudpubsub + - receiver/googlecloudspanner - receiver/haproxy - receiver/hostmetrics - receiver/httpcheck diff --git a/.github/ISSUE_TEMPLATE/unmaintained.yaml b/.github/ISSUE_TEMPLATE/unmaintained.yaml index 2ecfeb2eccd4..defe573a3302 100644 --- a/.github/ISSUE_TEMPLATE/unmaintained.yaml +++ b/.github/ISSUE_TEMPLATE/unmaintained.yaml @@ -236,6 +236,7 @@ body: - receiver/github - receiver/googlecloudmonitoring - receiver/googlecloudpubsub + - receiver/googlecloudspanner - receiver/haproxy - receiver/hostmetrics - receiver/httpcheck diff --git a/cmd/githubgen/allowlist.txt b/cmd/githubgen/allowlist.txt index 8f167c094a33..7a8183addc22 100644 --- a/cmd/githubgen/allowlist.txt +++ b/cmd/githubgen/allowlist.txt @@ -25,4 +25,7 @@ abhishek-at-cloudwerx joker-star-l michael-burt Hemansh31 -shazlehu \ No newline at end of file +shazlehu +dsimil +KiranmayiB +harishbohara11 \ No newline at end of file diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index 0b8d3d6b9650..597334242c90 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -158,6 +158,7 @@ receivers: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/githubreceiver v0.112.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudmonitoringreceiver v0.112.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudpubsubreceiver v0.112.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver v0.112.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/haproxyreceiver v0.112.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.112.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/httpcheckreceiver v0.112.0 @@ -413,6 +414,7 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/httpforwarderextension => ../../extension/httpforwarderextension - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter => ../../exporter/elasticsearchexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => ../../exporter/awscloudwatchlogsexporter + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver => ../../receiver/googlecloudspannerreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => ../../receiver/prometheusreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter => ../../exporter/sapmexporter - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet => ../../internal/kubelet diff --git a/receiver/googlecloudspannerreceiver/Makefile b/receiver/googlecloudspannerreceiver/Makefile new file mode 100644 index 000000000000..c1496226e590 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common \ No newline at end of file diff --git a/receiver/googlecloudspannerreceiver/README.md b/receiver/googlecloudspannerreceiver/README.md new file mode 100644 index 000000000000..97f01653fda7 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/README.md @@ -0,0 +1,83 @@ +# Google Cloud Spanner Receiver + + +| Status | | +| ------------- |-----------| +| Stability | [beta]: metrics | +| Distributions | [contrib] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fgooglecloudspanner%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fgooglecloudspanner) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fgooglecloudspanner%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fgooglecloudspanner) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@dashpole](https://www.github.com/dashpole), [@dsimil](https://www.github.com/dsimil), [@KiranmayiB](https://www.github.com/KiranmayiB), [@harishbohara11](https://www.github.com/harishbohara11) | +| Emeritus | [@architjugran](https://www.github.com/architjugran), [@varunraiko](https://www.github.com/varunraiko) | + +[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib + + +Google Cloud Spanner enable you to investigate issues with your database +by exposing via [Total and Top N built-in tables](https://cloud.google.com/spanner/docs/introspection): +- Query statistics +- Read statistics +- Transaction statistics +- Lock statistics +- and others + +_Note_: Total and Top N built-in tables are used with 1 minute statistics granularity. + +The ultimate goal of Google Cloud Spanner Receiver is to collect and transform those statistics into metrics +that would be convenient for further analysis by users. + +## Getting Started + +The following configuration example is: + +```yaml +receivers: + googlecloudspanner: + collection_interval: 60s + initial_delay: 1s + top_metrics_query_max_rows: 100 + backfill_enabled: true + cardinality_total_limit: 200000 + hide_topn_lockstats_rowrangestartkey: false + truncate_text: false + projects: + - project_id: "spanner project 1" + service_account_key: "path to spanner project 1 service account json key" + instances: + - instance_id: "id1" + databases: + - "db11" + - "db12" + - instance_id: "id2" + databases: + - "db21" + - "db22" + - project_id: "spanner project 2" + service_account_key: "path to spanner project 2 service account json key" + instances: + - instance_id: "id3" + databases: + - "db31" + - "db32" + - instance_id: "id4" + databases: + - "db41" + - "db42" +``` + +Brief description of configuration properties: +- **googlecloudspanner** - name of the Cloud Spanner Receiver related section in OpenTelemetry collector configuration file +- **collection_interval** - this receiver runs periodically. Each time it runs, it queries Google Cloud Spanner, creates metrics, and sends them to the next consumer (default: 1 minute). **It is not recommended to change the default value of collection interval, since new values for metrics in the Spanner database appear only once a minute.** +- **initial_delay** defines how long this receiver waits before starting. +- **top_metrics_query_max_rows** - max number of rows to fetch from Top N built-in table(100 by default) +- **backfill_enabled** - turn on/off 1-hour data backfill(by default it is turned off) +- **cardinality_total_limit** - limit of active series per 24 hours period. If specified, turns on cardinality filtering and handling. If zero or not specified, cardinality is not handled. You can read [this document](cardinality.md) for more information about cardinality handling and filtering. +- **hide_topn_lockstats_rowrangestartkey** - if true, masks PII (key values) in row_range_start_key label for the "top minute lock stats" metric +- **truncate_text** - if true, the query text is truncated to 1024 characters. +- **projects** - list of GCP projects + - **project_id** - identifier of GCP project + - **service_account_key** - path to service account JSON key It is highly recommended to set this property to the correct value. In case it is empty, the [Application Default Credentials](https://google.aip.dev/auth/4110) will be used for the database connection. + - **instances** - list of Google Cloud Spanner instance for connection + - **instance_id** - identifier of Google Cloud Spanner instance + - **databases** - list of databases used from this instance + diff --git a/receiver/googlecloudspannerreceiver/cardinality.md b/receiver/googlecloudspannerreceiver/cardinality.md new file mode 100644 index 000000000000..4df7193e9fff --- /dev/null +++ b/receiver/googlecloudspannerreceiver/cardinality.md @@ -0,0 +1,80 @@ +# Cloud Spanner: Controlling Metrics Cardinality for Open Telemetry Receiver + +## Overview +This document describes the algorithm used to control the cardinality of the Top N metrics exported by Cloud Spanner receiver in OpenTelemetry collector. +With cardinality limits enforced in Cloud Monitoring and possibly in Prometheus, it is critical to control the cardinality of the metrics to avoid drops in time series which can limit the usability of Cloud Spanner custom metrics. + +## Background +Cloud Monitoring has 200,000 active time series (streams) [limits](https://cloud.google.com/monitoring/quotas) for custom metrics per project. +Active time series translates to streams that are sent in the last 24 hours. +This means that the number of streams that can be sent by OpenTelemetry collector should be controlled to this limit as the metrics will be dropped by Cloud Monitoring. + +## Calculation +For simplicity purpose and for explaining of calculation, which is currently done in implementation, lets assume that we have 1 project, 1 instance and 1 database. +Such calculations are done when the collector starts. If the cardinality total limit is different(for calculations below we assume it is 200,000) the numbers will be different. + +According to the metrics [metadata configuration](internal/metadataconfig/metrics.yaml)(at the moment of writing this document) we have: +- 30 low cardinality metrics(amount of total N + active queries summary metrics); +- 26 high cardinality metrics(amount of top N metrics). + +For low cardinality metrics, the allowed amount of time series is **projects x instances x databases x low cardinality metrics** and in our case is **1 x 1 x 1 x 30 = 30** (1 per each metric). + +Thus, the remaining quota of allowed time series in 24 hours is **200,000 - 30 = 199.970**. + +For high cardinality metrics we have **199970 / 26 = 7691** allowed time series per metric. + +This means each metric converted from these columns should not have a cardinality of 7691 per day. + +We have one time series per minute, meaning that we can have up to 1,440 (24 hours x 60 minutes per hour = 1440) values per day. + +Taking into account 1440 time series we'll obtain **7691 / 1440 = 5** new time series per minute. + + +## Labels +For each metric in the Total N table, the labels that create a unique time series are **Project ID + Instance ID + Database ID**. + +For each metric in the Top N table, the labels that create a unique time series are **Project ID + Instance ID + Database ID + one of Query/Txn/Read/RowKey**. + +While fingerprint and truncated field are part of some Top N table, it does not add to cardinality as there is 1 to 1 correspondence with Query/Txn/Read to their fingerprint and truncated information. + +Cardinality is the number of unique sets of labels that are sent to Cloud Monitoring to create a new time series over a 24-hour period. + +## Detailed Design +To avoid confusion between labels and queries/txn shapes/read shapes and row key, queries will be loosely used in the below design to make it easier for understanding the design. + +Also, the **7691** time series limit is used as an example. +This needs to be made configurable based on the number of databases, number of metrics per table and future growth of the metrics per table. + +With **7691** time series limit and each metric is sent once per minute, there are **60 minute x 24 hours = 1440** time series data will be sent to Cloud Monitoring. + +To allow new queries (new Top queries which are not seen in the last 24 hours) to be sent to Cloud Monitoring, each minute should have an opportunity to send new queries. +Assuming a uniform distribution of new queries being created in Top N tables, the algorithm should handle **floor(7691/1440) = 5** new queries per minute. +These 5 new queries should be the Top 5 queries which were not exported in the last 24 hours. + +While the existing time series (queries which have been exported in the last 24 hours) should be good to export, exporting more than 100(**top_metrics_query_max_rows** receiver config parameter) queries per minute may be an overkill. +Customers can tune the maximum number of queries per minute per metric to a smaller value if required. + +An LRU cache with TTL of 24 hours is maintained per each Top N metric. +For each collection interval(1 minute), cache will be populated with no more than 5 new queries, (i.e) if cache does not have an entry for the query, it will be added to the cache. +This can be done no more than 5 times every time. +This means that after the first minute, collector can send 5 queries, second minute collector can send 10 queries(5 new and 5 existing queries sent in the first minute which is also seen in top 100 for second minute), third minute collector can send 15 queries(5 new and 10 existing queries sent in the first two minute which is also seen in top 100 for third minute). +In a normal state after 20 minutes, the collector will be sending about 100 of queries per minute if the top 95 queries are already in the cache and 5 new queries(if it is in top 100 queries) are added to the cache in that interval. + +Since LRU cache is in memory of collector instance - you'll have separate cache and limits per instance-metric. +Ability to use external caching for this is currently out of scope. +In case of multiple collector instances used you need to remember that handling overall total cardinality limit for all metrics will be user responsibility and needs to be properly defined in receiver configuration. +Also, there is ability to turn on/off cardinality handling using receiver configuration(it is turned off by default). + +## Limitation +While this algorithm is pessimistic, it provides the guarantee that new 5 queries in top 100 queries will be seen all the time during normal operation without losing information. +If there are 6 new queries which are seen in a minute, the top 5 new queries will be sent and the 6th one will not be sent in that minute. +Next minute may capture that query if that query is still in the top 5 new query. + +To alleviate this limitation of 5 queries at the start of the collector, the receiver can collect the last 20 minutes of Top Queries(like replay) and populate the LRU cache. +This will allow the receiver to send 100 queries at the first minute. +Backfilling will also populate the cache with the last one hour of queries which can avoid 5 query limitations and still maintain 5 new query per minute going forward. + +## Alternatives Considered +One option that was considered is to allow aggressive collection where the top 100 will always be sent till the limit is reached. +This means if every minute has new top 100 queries(worst case), new queries after the limit of 7691 cannot be sent. The limit can be reached in 76 minutes (7691/100 new queries per minute = 76 minutes). +Though it is impractical to see 100 new queries all the time in top 100 queries, it is quite possible that the limit can be hit easily if the number of time series per metric is limited to 2500 queries per day to allow for more databases to be supported or if the queries are using literals instead of parameters. diff --git a/receiver/googlecloudspannerreceiver/config.go b/receiver/googlecloudspannerreceiver/config.go new file mode 100644 index 000000000000..ae88f0b00c9c --- /dev/null +++ b/receiver/googlecloudspannerreceiver/config.go @@ -0,0 +1,104 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package googlecloudspannerreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver" + +import ( + "errors" + "fmt" + + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +const ( + minCollectionIntervalSeconds = 60 + maxTopMetricsQueryMaxRows = 100 +) + +type Config struct { + scraperhelper.ControllerConfig `mapstructure:",squash"` + + TopMetricsQueryMaxRows int `mapstructure:"top_metrics_query_max_rows"` + BackfillEnabled bool `mapstructure:"backfill_enabled"` + CardinalityTotalLimit int `mapstructure:"cardinality_total_limit"` + Projects []Project `mapstructure:"projects"` + HideTopnLockstatsRowrangestartkey bool `mapstructure:"hide_topn_lockstats_rowrangestartkey"` + TruncateText bool `mapstructure:"truncate_text"` +} + +type Project struct { + ID string `mapstructure:"project_id"` + ServiceAccountKey string `mapstructure:"service_account_key"` + Instances []Instance `mapstructure:"instances"` +} + +type Instance struct { + ID string `mapstructure:"instance_id"` + Databases []string `mapstructure:"databases"` +} + +func (config *Config) Validate() error { + if config.CollectionInterval.Seconds() < minCollectionIntervalSeconds { + return fmt.Errorf("\"collection_interval\" must be not lower than %v seconds, current value is %v seconds", minCollectionIntervalSeconds, config.CollectionInterval.Seconds()) + } + + if config.TopMetricsQueryMaxRows <= 0 { + return fmt.Errorf("\"top_metrics_query_max_rows\" must be positive: %v", config.TopMetricsQueryMaxRows) + } + + if config.TopMetricsQueryMaxRows > maxTopMetricsQueryMaxRows { + return fmt.Errorf("\"top_metrics_query_max_rows\" must be not greater than %v, current value is %v", maxTopMetricsQueryMaxRows, config.TopMetricsQueryMaxRows) + } + + if config.CardinalityTotalLimit < 0 { + return fmt.Errorf("\"cardinality_total_limit\" must be not negative, current value is %v", config.CardinalityTotalLimit) + } + + if len(config.Projects) == 0 { + return errors.New("missing required field \"projects\" or its value is empty") + } + + for _, project := range config.Projects { + if err := project.Validate(); err != nil { + return err + } + } + + return nil +} + +func (project Project) Validate() error { + if project.ID == "" { + return errors.New(`field "project_id" is required and cannot be empty for project configuration`) + } + + if len(project.Instances) == 0 { + return errors.New("field \"instances\" is required and cannot be empty for project configuration") + } + + for _, instance := range project.Instances { + if err := instance.Validate(); err != nil { + return err + } + } + + return nil +} + +func (instance Instance) Validate() error { + if instance.ID == "" { + return errors.New("field \"instance_id\" is required and cannot be empty for instance configuration") + } + + if len(instance.Databases) == 0 { + return errors.New("field \"databases\" is required and cannot be empty for instance configuration") + } + + for _, database := range instance.Databases { + if database == "" { + return errors.New("field \"databases\" contains empty database names") + } + } + + return nil +} diff --git a/receiver/googlecloudspannerreceiver/config_test.go b/receiver/googlecloudspannerreceiver/config_test.go new file mode 100644 index 000000000000..fb7670b941a4 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/config_test.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package googlecloudspannerreceiver + +import ( + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + cardinalityLimit = 200_000 +) + +func TestLoadConfig(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "").String()) + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(cfg)) + + assert.Equal(t, + &Config{ + ControllerConfig: scraperhelper.ControllerConfig{ + CollectionInterval: 120 * time.Second, + InitialDelay: time.Second, + }, + TopMetricsQueryMaxRows: 10, + BackfillEnabled: true, + CardinalityTotalLimit: 200000, + HideTopnLockstatsRowrangestartkey: true, + TruncateText: true, + Projects: []Project{ + { + ID: "spanner project 1", + ServiceAccountKey: "path to spanner project 1 service account json key", + Instances: []Instance{ + { + ID: "id1", + Databases: []string{"db11", "db12"}, + }, + { + ID: "id2", + Databases: []string{"db21", "db22"}, + }, + }, + }, + { + ID: "spanner project 2", + ServiceAccountKey: "path to spanner project 2 service account json key", + Instances: []Instance{ + { + ID: "id3", + Databases: []string{"db31", "db32"}, + }, + { + ID: "id4", + Databases: []string{"db41", "db42"}, + }, + }, + }, + }, + }, + cfg, + ) +} + +func TestValidateInstance(t *testing.T) { + testCases := map[string]struct { + id string + databases []string + requireError bool + }{ + "All required fields are populated": {"id", []string{"name"}, false}, + "No id": {"", []string{"name"}, true}, + "No databases": {"id", nil, true}, + "Databases have empty names": {"id", []string{""}, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + instance := Instance{ + ID: testCase.id, + Databases: testCase.databases, + } + + err := instance.Validate() + + if testCase.requireError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidateProject(t *testing.T) { + instance := Instance{ + ID: "id", + Databases: []string{"name"}, + } + + testCases := map[string]struct { + id string + serviceAccountKey string + instances []Instance + requireError bool + }{ + "All required fields are populated": {"id", "key", []Instance{instance}, false}, + "No id": {"", "key", []Instance{instance}, true}, + "No service account key": {"id", "", []Instance{instance}, false}, + "No instances": {"id", "key", nil, true}, + "Invalid instance in instances": {"id", "key", []Instance{{}}, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + project := Project{ + ID: testCase.id, + ServiceAccountKey: testCase.serviceAccountKey, + Instances: testCase.instances, + } + + err := project.Validate() + + if testCase.requireError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidateConfig(t *testing.T) { + instance := Instance{ + ID: "id", + Databases: []string{"name"}, + } + + project := Project{ + ID: "id", + ServiceAccountKey: "key", + Instances: []Instance{instance}, + } + + testCases := map[string]struct { + collectionInterval time.Duration + topMetricsQueryMaxRows int + cardinalityOverallLimit int + projects []Project + requireError bool + }{ + "All required fields are populated": {defaultCollectionInterval, defaultTopMetricsQueryMaxRows, cardinalityLimit, []Project{project}, false}, + "Invalid collection interval": {-1, defaultTopMetricsQueryMaxRows, cardinalityLimit, []Project{project}, true}, + "Invalid top metrics query max rows": {defaultCollectionInterval, -1, cardinalityLimit, []Project{project}, true}, + "Top metrics query max rows greater than max allowed": {defaultCollectionInterval, defaultTopMetricsQueryMaxRows + 1, cardinalityLimit, []Project{project}, true}, + "No projects": {defaultCollectionInterval, defaultTopMetricsQueryMaxRows, cardinalityLimit, nil, true}, + "Invalid project in projects": {defaultCollectionInterval, defaultTopMetricsQueryMaxRows, cardinalityLimit, []Project{{}}, true}, + "Cardinality overall limit is zero": {defaultCollectionInterval, defaultTopMetricsQueryMaxRows, 0, []Project{project}, false}, + "Cardinality overall limit is negative": {defaultCollectionInterval, defaultTopMetricsQueryMaxRows, -cardinalityLimit, []Project{project}, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + cfg := &Config{ + ControllerConfig: scraperhelper.ControllerConfig{ + CollectionInterval: testCase.collectionInterval, + }, + TopMetricsQueryMaxRows: testCase.topMetricsQueryMaxRows, + CardinalityTotalLimit: testCase.cardinalityOverallLimit, + Projects: testCase.projects, + } + + err := component.ValidateConfig(cfg) + + if testCase.requireError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/doc.go b/receiver/googlecloudspannerreceiver/doc.go new file mode 100644 index 000000000000..e1cc67419c2c --- /dev/null +++ b/receiver/googlecloudspannerreceiver/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package googlecloudspannerreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver" diff --git a/receiver/googlecloudspannerreceiver/factory.go b/receiver/googlecloudspannerreceiver/factory.go new file mode 100644 index 000000000000..60b6c5af5b42 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/factory.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package googlecloudspannerreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver" + +import ( + "context" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + defaultCollectionInterval = 60 * time.Second + defaultTopMetricsQueryMaxRows = 100 + defaultBackfillEnabled = false + defaultHideTopnLockstatsRowrangestartkey = false + defaultTruncateText = false +) + +func NewFactory() receiver.Factory { + return receiver.NewFactory( + metadata.Type, + createDefaultConfig, + receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability)) +} + +func createDefaultConfig() component.Config { + return &Config{ + ControllerConfig: scraperhelper.NewDefaultControllerConfig(), + TopMetricsQueryMaxRows: defaultTopMetricsQueryMaxRows, + BackfillEnabled: defaultBackfillEnabled, + HideTopnLockstatsRowrangestartkey: defaultHideTopnLockstatsRowrangestartkey, + TruncateText: defaultTruncateText, + } +} + +func createMetricsReceiver( + _ context.Context, + settings receiver.Settings, + baseCfg component.Config, + consumer consumer.Metrics, +) (receiver.Metrics, error) { + + rCfg := baseCfg.(*Config) + r := newGoogleCloudSpannerReceiver(settings.Logger, rCfg) + + scraper, err := scraperhelper.NewScraper(metadata.Type, r.Scrape, scraperhelper.WithStart(r.Start), + scraperhelper.WithShutdown(r.Shutdown)) + if err != nil { + return nil, err + } + + return scraperhelper.NewScraperControllerReceiver(&rCfg.ControllerConfig, settings, consumer, + scraperhelper.AddScraper(scraper)) +} diff --git a/receiver/googlecloudspannerreceiver/factory_test.go b/receiver/googlecloudspannerreceiver/factory_test.go new file mode 100644 index 000000000000..fa0610164217 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/factory_test.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package googlecloudspannerreceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/receivertest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") +} + +func TestType(t *testing.T) { + factory := NewFactory() + assert.Equal(t, metadata.Type, factory.Type()) +} + +func TestCreateMetrics(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + receiverConfig := cfg.(*Config) + + receiver, err := factory.CreateMetrics( + context.Background(), + receivertest.NewNopSettings(), + receiverConfig, + consumertest.NewNop(), + ) + + assert.NoError(t, err) + assert.NotNil(t, receiver, "failed to create metrics receiver") +} diff --git a/receiver/googlecloudspannerreceiver/generated_component_test.go b/receiver/googlecloudspannerreceiver/generated_component_test.go new file mode 100644 index 000000000000..e49589d1b428 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/generated_component_test.go @@ -0,0 +1,69 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package googlecloudspannerreceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +func TestComponentFactoryType(t *testing.T) { + require.Equal(t, "googlecloudspanner", NewFactory().Type().String()) +} + +func TestComponentConfigStruct(t *testing.T) { + require.NoError(t, componenttest.CheckConfigStruct(NewFactory().CreateDefaultConfig())) +} + +func TestComponentLifecycle(t *testing.T) { + factory := NewFactory() + + tests := []struct { + name string + createFn func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) + }{ + + { + name: "metrics", + createFn: func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) { + return factory.CreateMetrics(ctx, set, cfg, consumertest.NewNop()) + }, + }, + } + + cm, err := confmaptest.LoadConf("metadata.yaml") + require.NoError(t, err) + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub("tests::config") + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(&cfg)) + + for _, tt := range tests { + t.Run(tt.name+"-shutdown", func(t *testing.T) { + c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + t.Run(tt.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + host := componenttest.NewNopHost() + require.NoError(t, err) + require.NoError(t, firstRcvr.Start(context.Background(), host)) + require.NoError(t, firstRcvr.Shutdown(context.Background())) + secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + require.NoError(t, secondRcvr.Start(context.Background(), host)) + require.NoError(t, secondRcvr.Shutdown(context.Background())) + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/generated_package_test.go b/receiver/googlecloudspannerreceiver/generated_package_test.go new file mode 100644 index 000000000000..10ef04b6d1de --- /dev/null +++ b/receiver/googlecloudspannerreceiver/generated_package_test.go @@ -0,0 +1,13 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package googlecloudspannerreceiver + +import ( + "os" + "testing" +) + +func TestMain(m *testing.M) { + // skipping goleak test as per metadata.yml configuration + os.Exit(m.Run()) +} diff --git a/receiver/googlecloudspannerreceiver/go.mod b/receiver/googlecloudspannerreceiver/go.mod new file mode 100644 index 000000000000..1f6a5e03872e --- /dev/null +++ b/receiver/googlecloudspannerreceiver/go.mod @@ -0,0 +1,95 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver + +go 1.22.0 + +require ( + cloud.google.com/go/spanner v1.70.0 + github.com/ReneKroon/ttlcache/v2 v2.11.0 + github.com/mitchellh/hashstructure v1.1.0 + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/component v0.112.0 + go.opentelemetry.io/collector/confmap v1.18.0 + go.opentelemetry.io/collector/consumer v0.112.0 + go.opentelemetry.io/collector/consumer/consumertest v0.112.0 + go.opentelemetry.io/collector/pdata v1.18.0 + go.opentelemetry.io/collector/receiver v0.112.0 + go.uber.org/goleak v1.3.0 + go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.27.0 + google.golang.org/api v0.201.0 + google.golang.org/grpc v1.67.1 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + cel.dev/expr v0.16.0 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.9.8 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/iam v1.2.1 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect + cloud.google.com/go/monitoring v1.21.1 // indirect + github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.112.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.112.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.112.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.112.0 // indirect + go.opentelemetry.io/collector/pipeline v0.112.0 // indirect + go.opentelemetry.io/collector/receiver/receiverprofiles v0.112.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.31.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/protobuf v1.35.1 // indirect +) + +retract ( + v0.76.2 + v0.76.1 + v0.65.0 +) diff --git a/receiver/googlecloudspannerreceiver/go.sum b/receiver/googlecloudspannerreceiver/go.sum new file mode 100644 index 000000000000..e31f38e8a54a --- /dev/null +++ b/receiver/googlecloudspannerreceiver/go.sum @@ -0,0 +1,1691 @@ +cel.dev/expr v0.16.0 h1:yloc84fytn4zmJX2GU3TkXGsaieaV7dQ057Qs4sIG2Y= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8= +cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/spanner v1.70.0 h1:nj6p/GJTgMDiSQ1gQ034ItsKuJgHiMOjtOlONOg8PSo= +cloud.google.com/go/spanner v1.70.0/go.mod h1:X5T0XftydYp0K1adeJQDJtdWpbrOeJ7wHecM4tK6FiE= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0 h1:oVLqHXhnYtUwM89y9T1fXGaK9wTkXHgNp8/ZNMQzUxE= +github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/ReneKroon/ttlcache/v2 v2.11.0 h1:OvlcYFYi941SBN3v9dsDcC2N8vRxyHcCmJb3Vl4QMoM= +github.com/ReneKroon/ttlcache/v2 v2.11.0/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 h1:fLZ97KE86ELjEYJCEUVzmbhfzDxHHGwBrDVMd4XL6Bs= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= +github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector/component v0.112.0 h1:Hw125Tdb427yKkzFx3U/OsfPATYXsbURkc27dn19he8= +go.opentelemetry.io/collector/component v0.112.0/go.mod h1:hV9PEgkNlVAySX+Oo/g7+NcLe234L04kRXw6uGj3VEw= +go.opentelemetry.io/collector/config/configtelemetry v0.112.0 h1:MVBrWJUoqfKrORI38dY8OV0i5d1RRHR/ACIBu9TOcZ8= +go.opentelemetry.io/collector/config/configtelemetry v0.112.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc= +go.opentelemetry.io/collector/confmap v1.18.0 h1:UEOeJY8RW8lZ1O4lzHSGqolS7uzkpXQi5fa8SidKqQg= +go.opentelemetry.io/collector/confmap v1.18.0/go.mod h1:GgNu1ElPGmLn9govqIfjaopvdspw4PJ9KeDtWC4E2Q4= +go.opentelemetry.io/collector/consumer v0.112.0 h1:tfO4FpuQ8MsD7AxgslC3tRNVYjd9Xkus34BOExsG4fM= +go.opentelemetry.io/collector/consumer v0.112.0/go.mod h1:ZKSeGvXvaofIlvPrWlARKQpONOmuw6R/yifgYCWHKRw= +go.opentelemetry.io/collector/consumer/consumererror v0.112.0 h1:dCqWEi3Yws5V5oGhCSOwxCHK6tYya5UzfzXmSLMHZ8E= +go.opentelemetry.io/collector/consumer/consumererror v0.112.0/go.mod h1:X9RJt5caDnwxoG++GhQHvlmDi2TMWEr6S/XRhZTSmOI= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.112.0 h1:ym+QxemlbWwfMSUto1hRTfcZeYbj2q8FpMzjk8O+X60= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.112.0/go.mod h1:4PjDUpURFh85R6NLEHrEf/uZjpk4LAYmmOrqu+iZsyE= +go.opentelemetry.io/collector/consumer/consumertest v0.112.0 h1:pGvNH+H4rMygUOql6ynVQim6UFdimTiJ0HRfQL6v0GE= +go.opentelemetry.io/collector/consumer/consumertest v0.112.0/go.mod h1:rfVo0tYt/BaLWw3IaQKVQafjUlMsA5qTkvsSOfFrr9c= +go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg= +go.opentelemetry.io/collector/pdata v1.18.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= +go.opentelemetry.io/collector/pdata/pprofile v0.112.0 h1:t+LYorcMqZ3sDz5/jp3xU2l5lIhIXuIOOGO4Ef9CG2c= +go.opentelemetry.io/collector/pdata/pprofile v0.112.0/go.mod h1:F2aTCoDzIaxEUK1g92LZvMwradySFMo3ZsAnBIpOdUg= +go.opentelemetry.io/collector/pdata/testdata v0.112.0 h1:7jJzNvRE+CpYrwHbAYwPiN9a/hqmVRlRADJNeDJTvYI= +go.opentelemetry.io/collector/pdata/testdata v0.112.0/go.mod h1:9kO148Qp12B93SSUE52s0QGGV8Nf9RFN2G/PnZx3l+w= +go.opentelemetry.io/collector/pipeline v0.112.0 h1:jqKDdb8k53OLPibvxzX6fmMec0ZHAtqe4p2+cuHclEI= +go.opentelemetry.io/collector/pipeline v0.112.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg= +go.opentelemetry.io/collector/receiver v0.112.0 h1:gdTBDOPGKMZlZghtN5A7ZLNlNwCHWYcoJQeIiXvyGEQ= +go.opentelemetry.io/collector/receiver v0.112.0/go.mod h1:3QmfSUiyFzRTnHUqF8fyEvQpU5q/xuwS43jGt8JXEEA= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.112.0 h1:SShkZsWRsFss3iWZa9JwMC7h4gD5RbWDhUcz1/9dXSs= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.112.0/go.mod h1:615smszDXiz4YWwXslxlAjX7FzOVDU7Bk6xARFk+zpk= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0= +google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 h1:nFS3IivktIU5Mk6KQa+v6RKkHUpdQpphqGNLxqNnbEk= +google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:tEzYTYZxbmVNOu0OAFH9HzdJtLn6h4Aj89zzlBCdHms= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/receiver/googlecloudspannerreceiver/internal/datasource/database.go b/receiver/googlecloudspannerreceiver/internal/datasource/database.go new file mode 100644 index 000000000000..9888e8c60ba4 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/datasource/database.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package datasource // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + +import ( + "context" + + "cloud.google.com/go/spanner" + "google.golang.org/api/option" +) + +type Database struct { + client *spanner.Client + databaseID *DatabaseID +} + +func (database *Database) Client() *spanner.Client { + return database.client +} + +func (database *Database) DatabaseID() *DatabaseID { + return database.databaseID +} + +func NewDatabase(ctx context.Context, databaseID *DatabaseID, credentialsFilePath string) (*Database, error) { + var client *spanner.Client + var err error + + if credentialsFilePath != "" { + credentialsFileClientOption := option.WithCredentialsFile(credentialsFilePath) + client, err = spanner.NewClient(ctx, databaseID.ID(), credentialsFileClientOption) + } else { + // Fallback to Application Default Credentials(https://google.aip.dev/auth/4110) + client, err = spanner.NewClient(ctx, databaseID.ID()) + } + + if err != nil { + return nil, err + } + + return NewDatabaseFromClient(client, databaseID), nil +} + +func NewDatabaseFromClient(client *spanner.Client, databaseID *DatabaseID) *Database { + return &Database{ + client: client, + databaseID: databaseID, + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/datasource/database_test.go b/receiver/googlecloudspannerreceiver/internal/datasource/database_test.go new file mode 100644 index 000000000000..2cd2bc44ffa8 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/datasource/database_test.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package datasource + +import ( + "context" + "testing" + + "cloud.google.com/go/spanner" + "github.com/stretchr/testify/assert" +) + +func TestNewDatabaseFromClient(t *testing.T) { + ctx := context.Background() + client, _ := spanner.NewClient(ctx, "") + databaseID := databaseID() + + database := NewDatabaseFromClient(client, databaseID) + + assert.Equal(t, client, database.Client()) + assert.Equal(t, databaseID, database.DatabaseID()) +} + +func TestNewDatabase(t *testing.T) { + ctx := context.Background() + databaseID := databaseID() + + database, err := NewDatabase(ctx, databaseID, "../../testdata/serviceAccount.json") + + assert.NoError(t, err) + assert.NotNil(t, database.Client()) + assert.Equal(t, databaseID, database.DatabaseID()) +} + +func TestNewDatabaseWithError(t *testing.T) { + ctx := context.Background() + databaseID := databaseID() + + database, err := NewDatabase(ctx, databaseID, "does not exist") + + assert.Error(t, err) + assert.Nil(t, database) +} + +func TestNewDatabaseWithNoCredentialsFilePath(t *testing.T) { + ctx := context.Background() + databaseID := databaseID() + + t.Setenv("GOOGLE_APPLICATION_CREDENTIALS", "../../testdata/serviceAccount.json") + + database, err := NewDatabase(ctx, databaseID, "") + + assert.NoError(t, err) + assert.NotNil(t, database.Client()) + assert.Equal(t, databaseID, database.DatabaseID()) +} diff --git a/receiver/googlecloudspannerreceiver/internal/datasource/databaseid.go b/receiver/googlecloudspannerreceiver/internal/datasource/databaseid.go new file mode 100644 index 000000000000..07e9f787c51b --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/datasource/databaseid.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package datasource // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + +import "fmt" + +type DatabaseID struct { + projectID string + instanceID string + databaseName string + id string +} + +func NewDatabaseID(projectID string, instanceID string, databaseName string) *DatabaseID { + return &DatabaseID{ + projectID: projectID, + instanceID: instanceID, + databaseName: databaseName, + id: fmt.Sprintf("projects/%v/instances/%v/databases/%v", projectID, instanceID, databaseName), + } +} + +func (databaseID *DatabaseID) ProjectID() string { + return databaseID.projectID +} + +func (databaseID *DatabaseID) InstanceID() string { + return databaseID.instanceID +} + +func (databaseID *DatabaseID) DatabaseName() string { + return databaseID.databaseName +} +func (databaseID *DatabaseID) ID() string { + return databaseID.id +} diff --git a/receiver/googlecloudspannerreceiver/internal/datasource/databaseid_test.go b/receiver/googlecloudspannerreceiver/internal/datasource/databaseid_test.go new file mode 100644 index 000000000000..513a79f6316e --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/datasource/databaseid_test.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package datasource + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + projectID = "projectID" + instanceID = "instanceID" + databaseName = "DatabaseName" +) + +func databaseID() *DatabaseID { + return NewDatabaseID(projectID, instanceID, databaseName) +} + +func TestNewDatabaseId(t *testing.T) { + databaseID := databaseID() + + assert.Equal(t, projectID, databaseID.projectID) + assert.Equal(t, instanceID, databaseID.instanceID) + assert.Equal(t, databaseName, databaseID.databaseName) + assert.Equal(t, "projects/"+projectID+"/instances/"+instanceID+"/databases/"+databaseName, databaseID.id) + assert.Equal(t, projectID, databaseID.ProjectID()) + assert.Equal(t, instanceID, databaseID.InstanceID()) + assert.Equal(t, databaseName, databaseID.DatabaseName()) + assert.Equal(t, "projects/"+projectID+"/instances/"+instanceID+"/databases/"+databaseName, databaseID.ID()) +} diff --git a/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality.go b/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality.go new file mode 100644 index 000000000000..dcafa414f783 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filter // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" + +import ( + "errors" + "fmt" + "sort" + "time" + + "github.com/ReneKroon/ttlcache/v2" + "go.uber.org/zap" +) + +type Item struct { + SeriesKey string + Timestamp time.Time +} + +type ItemFilter interface { + Filter(source []*Item) ([]*Item, error) + Shutdown() error + TotalLimit() int + LimitByTimestamp() int +} + +type ItemFilterResolver interface { + Resolve(metricFullName string) (ItemFilter, error) + Shutdown() error +} + +type itemCardinalityFilter struct { + metricName string + totalLimit int + limitByTimestamp int + itemActivityPeriod time.Duration + logger *zap.Logger + cache *ttlcache.Cache +} + +type currentLimitByTimestamp struct { + limitByTimestamp int +} + +func (f *currentLimitByTimestamp) dec() { + f.limitByTimestamp-- +} + +func (f *currentLimitByTimestamp) get() int { + return f.limitByTimestamp +} + +func NewItemCardinalityFilter(metricName string, totalLimit int, limitByTimestamp int, + itemActivityPeriod time.Duration, logger *zap.Logger) (ItemFilter, error) { + if limitByTimestamp > totalLimit { + return nil, fmt.Errorf("total limit %q is lower or equal to limit by timestamp %q", totalLimit, limitByTimestamp) + } + + cache := ttlcache.NewCache() + + cache.SetCacheSizeLimit(totalLimit) + cache.SkipTTLExtensionOnHit(true) + + return &itemCardinalityFilter{ + metricName: metricName, + totalLimit: totalLimit, + limitByTimestamp: limitByTimestamp, + itemActivityPeriod: itemActivityPeriod, + logger: logger, + cache: cache, + }, nil +} + +func (f *itemCardinalityFilter) TotalLimit() int { + return f.totalLimit +} + +func (f *itemCardinalityFilter) LimitByTimestamp() int { + return f.limitByTimestamp +} + +func (f *itemCardinalityFilter) Filter(sourceItems []*Item) ([]*Item, error) { + var filteredItems []*Item + groupedItems := groupByTimestamp(sourceItems) + sortedItemKeys := sortedKeys(groupedItems) + + for _, key := range sortedItemKeys { + filteredGroupedItems, err := f.filterItems(groupedItems[key]) + if err != nil { + return nil, err + } + + filteredItems = append(filteredItems, filteredGroupedItems...) + } + + return filteredItems, nil +} + +func (f *itemCardinalityFilter) filterItems(items []*Item) ([]*Item, error) { + limit := currentLimitByTimestamp{ + limitByTimestamp: f.limitByTimestamp, + } + + var filteredItems []*Item + for _, item := range items { + if included, err := f.includeItem(item, &limit); err != nil { + return nil, err + } else if included { + filteredItems = append(filteredItems, item) + } + } + + return filteredItems, nil +} + +func (f *itemCardinalityFilter) includeItem(item *Item, limit *currentLimitByTimestamp) (bool, error) { + if _, err := f.cache.Get(item.SeriesKey); err == nil { + return true, nil + } else if !errors.Is(err, ttlcache.ErrNotFound) { + return false, err + } + + if !f.canIncludeNewItem(limit.get()) { + f.logger.Debug("Skip item", zap.String("seriesKey", item.SeriesKey), zap.Time("timestamp", item.Timestamp)) + return false, nil + } + + if err := f.cache.SetWithTTL(item.SeriesKey, struct{}{}, f.itemActivityPeriod); err != nil { + if errors.Is(err, ttlcache.ErrClosed) { + err = fmt.Errorf("set item from cache failed for metric %q because cache has been already closed: %w", f.metricName, err) + } + return false, err + } + + f.logger.Debug("Added item to cache", zap.String("seriesKey", item.SeriesKey), zap.Time("timestamp", item.Timestamp)) + + limit.dec() + + return true, nil +} + +func (f *itemCardinalityFilter) canIncludeNewItem(currentLimitByTimestamp int) bool { + return f.cache.Count() < f.totalLimit && currentLimitByTimestamp > 0 +} + +func (f *itemCardinalityFilter) Shutdown() error { + return f.cache.Close() +} + +func groupByTimestamp(items []*Item) map[time.Time][]*Item { + groupedItems := make(map[time.Time][]*Item) + + for _, item := range items { + groupedItems[item.Timestamp] = append(groupedItems[item.Timestamp], item) + } + + return groupedItems +} + +func sortedKeys(groupedItems map[time.Time][]*Item) []time.Time { + keysForSorting := make([]time.Time, len(groupedItems)) + + i := 0 + for key := range groupedItems { + keysForSorting[i] = key + i++ + } + + sort.Slice(keysForSorting, func(i, j int) bool { return keysForSorting[i].Before(keysForSorting[j]) }) + + return keysForSorting +} diff --git a/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality_test.go b/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality_test.go new file mode 100644 index 000000000000..13c257804715 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality_test.go @@ -0,0 +1,338 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filter + +import ( + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" +) + +func TestNewItemCardinalityFilter(t *testing.T) { + logger := zaptest.NewLogger(t) + testCases := map[string]struct { + totalLimit int + limitByTimestamp int + expectError bool + }{ + "Happy path": {2, 1, false}, + "Overall limit is lower than limit by timestamp": {1, 2, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + filter, err := NewItemCardinalityFilter(metricName, testCase.totalLimit, testCase.limitByTimestamp, + itemActivityPeriod, logger) + + if testCase.expectError { + require.Nil(t, filter) + require.Error(t, err) + } else { + require.NoError(t, err) + filterCasted := filter.(*itemCardinalityFilter) + defer executeShutdown(t, filterCasted) + + assert.Equal(t, metricName, filterCasted.metricName) + assert.Equal(t, testCase.totalLimit, filterCasted.totalLimit) + assert.Equal(t, testCase.limitByTimestamp, filterCasted.limitByTimestamp) + assert.Equal(t, itemActivityPeriod, filterCasted.itemActivityPeriod) + assert.Equal(t, logger, filterCasted.logger) + require.NotNil(t, filterCasted.cache) + } + }) + } +} + +func TestItemCardinalityFilter_TotalLimit(t *testing.T) { + itemFilter := &itemCardinalityFilter{totalLimit: totalLimit} + + assert.Equal(t, totalLimit, itemFilter.TotalLimit()) +} + +func TestItemCardinalityFilter_LimitByTimestamp(t *testing.T) { + itemFilter := &itemCardinalityFilter{limitByTimestamp: limitByTimestamp} + + assert.Equal(t, limitByTimestamp, itemFilter.LimitByTimestamp()) +} + +func TestItemCardinalityFilter_CanIncludeNewItem(t *testing.T) { + logger := zaptest.NewLogger(t) + testCases := map[string]struct { + totalLimit int + limitByTimestamp int + keysAlreadyInCache []string + expectedResult bool + }{ + "No items in cache and timestamp limit hasn't been exhausted": {2, 1, nil, true}, + "Cache is full and timestamp limit hasn't been exhausted": {2, 1, []string{"qwerty1", "qwerty2"}, false}, + "No items in cache and timestamp limit has been exhausted": {2, 0, nil, false}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + filter, err := NewItemCardinalityFilter(metricName, testCase.totalLimit, testCase.limitByTimestamp, + itemActivityPeriod, logger) + require.NoError(t, err) + filterCasted := filter.(*itemCardinalityFilter) + defer executeShutdown(t, filterCasted) + + for _, key := range testCase.keysAlreadyInCache { + err = filterCasted.cache.Set(key, byte(1)) + require.NoError(t, err) + } + + assert.Equal(t, testCase.expectedResult, filterCasted.canIncludeNewItem(testCase.limitByTimestamp)) + }) + } +} + +func TestItemCardinalityFilter_Shutdown(t *testing.T) { + logger := zaptest.NewLogger(t) + testCases := map[string]struct { + closeCache bool + expectError bool + }{ + "Happy path": {false, false}, + "Cache has been closed": {true, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + filter, err := NewItemCardinalityFilter(metricName, totalLimit, limitByTimestamp, itemActivityPeriod, logger) + require.NoError(t, err) + filterCasted := filter.(*itemCardinalityFilter) + + if testCase.closeCache { + // Covering case when by some reasons cache is closed + err = filterCasted.cache.Close() + require.NoError(t, err) + } + + if testCase.expectError { + require.Error(t, filter.Shutdown()) + } else { + require.NoError(t, filter.Shutdown()) + } + }) + } +} + +func TestItemCardinalityFilter_Filter(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on Windows due to https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32397") + } + items := initialItems(t) + logger := zaptest.NewLogger(t) + filter, err := NewItemCardinalityFilter(metricName, totalLimit, limitByTimestamp, itemActivityPeriod, logger) + require.NoError(t, err) + filterCasted := filter.(*itemCardinalityFilter) + defer executeShutdown(t, filterCasted) + + filteredItems, err := filter.Filter(items) + require.NoError(t, err) + + // Items with key3 and key6 must be not present in filtered items + assertInitialFiltering(t, expectedFilteredInitialItems(t), filteredItems) + + items = additionalTestData(t) + filteredItems, err = filter.Filter(items) + require.NoError(t, err) + + // Cache timeout hasn't been reached, so filtered out all items + assert.Empty(t, filteredItems) + + // Doing this to avoid of relying on timeouts and sleeps(avoid potential flaky tests) + syncChannel := make(chan bool, 10) + + filterCasted.cache.SetExpirationCallback(func(string, any) { + if filterCasted.cache.Count() > 0 { + // Waiting until cache is really empty - all items are expired + return + } + syncChannel <- true + }) + + <-syncChannel + + filterCasted.cache.SetExpirationCallback(nil) + + filteredItems, err = filter.Filter(items) + require.NoError(t, err) + + // All entries expired, nothing should be filtered out from items + assertInitialFiltering(t, items, filteredItems) + + // Test filtering when cache was closed + filter, err = NewItemCardinalityFilter(metricName, totalLimit, limitByTimestamp, itemActivityPeriod, logger) + require.NoError(t, err) + require.NoError(t, filter.Shutdown()) + + filteredItems, err = filter.Filter(items) + + require.Error(t, err) + require.Nil(t, filteredItems) +} + +func TestItemCardinalityFilter_FilterItems(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on Windows due to https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32397") + } + items := initialItemsWithSameTimestamp(t) + logger := zaptest.NewLogger(t) + filter, err := NewItemCardinalityFilter(metricName, totalLimit, limitByTimestamp, itemActivityPeriod, logger) + require.NoError(t, err) + filterCasted := filter.(*itemCardinalityFilter) + defer executeShutdown(t, filterCasted) + + filteredItems, err := filterCasted.filterItems(items) + require.NoError(t, err) + + // Items with key1 and key2 must be not present in filtered items + assertInitialFiltering(t, expectedFilteredInitialItemsWithSameTimestamp(t), filteredItems) + + // 2 new and 2 existing items must be present in filtered items + filteredItems, err = filterCasted.filterItems(items) + require.NoError(t, err) + + assert.Len(t, filteredItems, totalLimit) + + filteredItems, err = filter.Filter(items) + require.NoError(t, err) + + // Cache timeout hasn't been reached, so no more new items expected + assert.Len(t, filteredItems, totalLimit) + + // Doing this to avoid of relying on timeouts and sleeps(avoid potential flaky tests) + syncChannel := make(chan bool, 10) + + filterCasted.cache.SetExpirationCallback(func(string, any) { + if filterCasted.cache.Count() > 0 { + // Waiting until cache is really empty - all items are expired + return + } + syncChannel <- true + }) + + <-syncChannel + + filterCasted.cache.SetExpirationCallback(nil) + + filteredItems, err = filter.Filter(items) + require.NoError(t, err) + + // All entries expired, same picture as on first case + assertInitialFiltering(t, expectedFilteredInitialItemsWithSameTimestamp(t), filteredItems) + + // Test filtering when cache was closed + filter, err = NewItemCardinalityFilter(metricName, totalLimit, limitByTimestamp, itemActivityPeriod, logger) + require.NoError(t, err) + require.NoError(t, filter.Shutdown()) + + filteredItems, err = filter.Filter(items) + + require.Error(t, err) + require.Nil(t, filteredItems) +} + +func TestItemCardinalityFilter_IncludeItem(t *testing.T) { + timestamp := time.Now().UTC() + item1 := &Item{SeriesKey: key1, Timestamp: timestamp} + item2 := &Item{SeriesKey: key2, Timestamp: timestamp} + logger := zaptest.NewLogger(t) + filter, err := NewItemCardinalityFilter(metricName, totalLimit, limitByTimestamp, itemActivityPeriod, logger) + require.NoError(t, err) + filterCasted := filter.(*itemCardinalityFilter) + defer executeShutdown(t, filterCasted) + timestampLimiter := ¤tLimitByTimestamp{ + limitByTimestamp: 1, + } + + result, err := filterCasted.includeItem(item1, timestampLimiter) + require.NoError(t, err) + assert.True(t, result) + + // Item already exists in cache + result, err = filterCasted.includeItem(item1, timestampLimiter) + require.NoError(t, err) + assert.True(t, result) + + // Limit by timestamp reached + result, err = filterCasted.includeItem(item2, timestampLimiter) + require.NoError(t, err) + assert.False(t, result) + + // Test with closed cache - do not need to execute shutdown in this case + filter, err = NewItemCardinalityFilter(metricName, totalLimit, limitByTimestamp, itemActivityPeriod, logger) + require.NoError(t, err) + filterCasted = filter.(*itemCardinalityFilter) + require.NoError(t, filterCasted.cache.Close()) + result, err = filterCasted.includeItem(item1, timestampLimiter) + require.Error(t, err) + assert.False(t, result) +} + +func TestGroupByTimestamp(t *testing.T) { + timestamp1, err := time.Parse(timestampLayout, timestamp1Str) + require.NoError(t, err) + timestamp2, err := time.Parse(timestampLayout, timestamp2Str) + require.NoError(t, err) + timestamp3, err := time.Parse(timestampLayout, timestamp3Str) + require.NoError(t, err) + + items := initialItems(t) + groupedItems := groupByTimestamp(items) + + assert.Len(t, groupedItems, 3) + assertGroupedByKey(t, items, groupedItems, timestamp1, 0) + assertGroupedByKey(t, items, groupedItems, timestamp2, 3) + assertGroupedByKey(t, items, groupedItems, timestamp3, 6) +} + +func TestSortedKeys(t *testing.T) { + timestamp1, err := time.Parse(timestampLayout, timestamp1Str) + require.NoError(t, err) + timestamp2, err := time.Parse(timestampLayout, timestamp2Str) + require.NoError(t, err) + timestamp3, err := time.Parse(timestampLayout, timestamp3Str) + require.NoError(t, err) + + data := map[time.Time][]*Item{ + timestamp3: {{SeriesKey: key3, Timestamp: timestamp3}}, + timestamp1: {{SeriesKey: key1, Timestamp: timestamp1}}, + timestamp2: {{SeriesKey: key2, Timestamp: timestamp2}}, + } + + keys := sortedKeys(data) + + assert.Equal(t, len(data), len(keys)) + assert.Equal(t, timestamp1, keys[0]) + assert.Equal(t, timestamp2, keys[1]) + assert.Equal(t, timestamp3, keys[2]) +} + +func TestCurrentLimitByTimestamp_Get(t *testing.T) { + timestampLimiter := ¤tLimitByTimestamp{ + limitByTimestamp: limitByTimestamp, + } + + assert.Equal(t, limitByTimestamp, timestampLimiter.get()) +} + +func TestCurrentLimitByTimestamp_Dec(t *testing.T) { + timestampLimiter := ¤tLimitByTimestamp{ + limitByTimestamp: limitByTimestamp, + } + + timestampLimiter.dec() + + assert.Equal(t, limitByTimestamp-1, timestampLimiter.limitByTimestamp) +} + +func executeShutdown(t *testing.T, filter *itemCardinalityFilter) { + require.NoError(t, filter.Shutdown()) +} diff --git a/receiver/googlecloudspannerreceiver/internal/filter/nopitemcardinality.go b/receiver/googlecloudspannerreceiver/internal/filter/nopitemcardinality.go new file mode 100644 index 000000000000..16b6568fdeda --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filter/nopitemcardinality.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filter // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" + +type nopItemCardinalityFilter struct { + // No fields here +} + +type nopItemFilterResolver struct { + nopFilter *nopItemCardinalityFilter +} + +func NewNopItemCardinalityFilter() ItemFilter { + return &nopItemCardinalityFilter{} +} + +func NewNopItemFilterResolver() ItemFilterResolver { + return &nopItemFilterResolver{ + nopFilter: &nopItemCardinalityFilter{}, + } +} + +func (f *nopItemCardinalityFilter) Filter(sourceItems []*Item) ([]*Item, error) { + return sourceItems, nil +} + +func (f *nopItemCardinalityFilter) Shutdown() error { + return nil +} + +func (f *nopItemCardinalityFilter) TotalLimit() int { + return 0 +} + +func (f *nopItemCardinalityFilter) LimitByTimestamp() int { + return 0 +} + +func (r *nopItemFilterResolver) Resolve(string) (ItemFilter, error) { + return r.nopFilter, nil +} + +func (r *nopItemFilterResolver) Shutdown() error { + return nil +} diff --git a/receiver/googlecloudspannerreceiver/internal/filter/nopitemcardinality_test.go b/receiver/googlecloudspannerreceiver/internal/filter/nopitemcardinality_test.go new file mode 100644 index 000000000000..348f314eed9d --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filter/nopitemcardinality_test.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filter + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNopItemCardinalityFilter_Filter(t *testing.T) { + filter := NewNopItemCardinalityFilter() + sourceItems := []*Item{{}} + + filteredItems, err := filter.Filter(sourceItems) + + require.NoError(t, err) + assert.Equal(t, sourceItems, filteredItems) +} + +func TestNopItemCardinalityFilter_Shutdown(t *testing.T) { + filter := &nopItemCardinalityFilter{} + + err := filter.Shutdown() + + require.NoError(t, err) +} + +func TestNopItemCardinalityFilter_TotalLimit(t *testing.T) { + filter := &nopItemCardinalityFilter{} + + assert.Equal(t, 0, filter.TotalLimit()) +} + +func TestNopItemCardinalityFilter_LimitByTimestamp(t *testing.T) { + filter := &nopItemCardinalityFilter{} + + assert.Equal(t, 0, filter.LimitByTimestamp()) +} + +func TestNopItemFilterResolver_Resolve(t *testing.T) { + itemFilterResolver := NewNopItemFilterResolver() + + itemFilter, err := itemFilterResolver.Resolve("test") + + require.NoError(t, err) + assert.Equal(t, &nopItemCardinalityFilter{}, itemFilter) +} + +func TestNopItemFilterResolver_Shutdown(t *testing.T) { + itemFilterResolver := NewNopItemFilterResolver() + + err := itemFilterResolver.Shutdown() + + require.NoError(t, err) +} diff --git a/receiver/googlecloudspannerreceiver/internal/filter/package_test.go b/receiver/googlecloudspannerreceiver/internal/filter/package_test.go new file mode 100644 index 000000000000..cbe47ab4d895 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filter/package_test.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filter + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/ReneKroon/ttlcache/v2.(*Cache).checkExpirationCallback")) +} diff --git a/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go b/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go new file mode 100644 index 000000000000..ab7d755ae582 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go @@ -0,0 +1,139 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filter + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + key1 = "key1" + key2 = "key2" + key3 = "key3" + key4 = "key4" + key5 = "key5" + key6 = "key6" + + metricName = "metricName" + + totalLimit = 4 + limitByTimestamp = 2 + + itemActivityPeriod = 50 * time.Millisecond + + timestampLayout = "2006-01-02T15:04:05.000Z" + + timestamp1Str = "2021-10-13T20:30:00.000Z" + timestamp2Str = "2021-10-13T20:30:05.000Z" + timestamp3Str = "2021-10-13T20:30:10.000Z" + timestamp4Str = "2021-10-13T20:30:20.000Z" +) + +func assertGroupedByKey(t *testing.T, items []*Item, groupedItems map[time.Time][]*Item, key time.Time, offsetInItems int) { + assert.Len(t, groupedItems[key], 3) + + for i := 0; i < 3; i++ { + assert.Equal(t, items[i+offsetInItems].SeriesKey, groupedItems[key][i].SeriesKey) + } +} + +func assertInitialFiltering(t *testing.T, expected []*Item, actual []*Item) { + require.Equal(t, len(expected), len(actual)) + for i, expectedItem := range expected { + assert.Equal(t, expectedItem.SeriesKey, actual[i].SeriesKey) + assert.Equal(t, expectedItem.Timestamp, actual[i].Timestamp) + } +} + +func initialItems(t *testing.T) []*Item { + timestamp1, err := time.Parse(timestampLayout, timestamp1Str) + require.NoError(t, err) + timestamp2, err := time.Parse(timestampLayout, timestamp2Str) + require.NoError(t, err) + timestamp3, err := time.Parse(timestampLayout, timestamp3Str) + require.NoError(t, err) + + data := []*Item{ + {SeriesKey: key1, Timestamp: timestamp1}, + {SeriesKey: key2, Timestamp: timestamp1}, + {SeriesKey: key3, Timestamp: timestamp1}, + + {SeriesKey: key1, Timestamp: timestamp2}, + {SeriesKey: key2, Timestamp: timestamp2}, + {SeriesKey: key5, Timestamp: timestamp2}, + + {SeriesKey: key4, Timestamp: timestamp3}, + {SeriesKey: key5, Timestamp: timestamp3}, + {SeriesKey: key6, Timestamp: timestamp3}, + } + + return data +} + +func expectedFilteredInitialItems(t *testing.T) []*Item { + timestamp1, err := time.Parse(timestampLayout, timestamp1Str) + require.NoError(t, err) + timestamp2, err := time.Parse(timestampLayout, timestamp2Str) + require.NoError(t, err) + timestamp3, err := time.Parse(timestampLayout, timestamp3Str) + require.NoError(t, err) + + data := []*Item{ + {SeriesKey: key1, Timestamp: timestamp1}, + {SeriesKey: key2, Timestamp: timestamp1}, + + {SeriesKey: key1, Timestamp: timestamp2}, + {SeriesKey: key2, Timestamp: timestamp2}, + {SeriesKey: key5, Timestamp: timestamp2}, + + {SeriesKey: key4, Timestamp: timestamp3}, + {SeriesKey: key5, Timestamp: timestamp3}, + } + + return data +} + +func additionalTestData(t *testing.T) []*Item { + timestamp, err := time.Parse(timestampLayout, timestamp4Str) + require.NoError(t, err) + + data := []*Item{ + {SeriesKey: key3, Timestamp: timestamp}, + {SeriesKey: key6, Timestamp: timestamp}, + } + + return data +} + +func initialItemsWithSameTimestamp(t *testing.T) []*Item { + timestamp, err := time.Parse(timestampLayout, timestamp1Str) + require.NoError(t, err) + + data := []*Item{ + {SeriesKey: key1, Timestamp: timestamp}, + {SeriesKey: key2, Timestamp: timestamp}, + {SeriesKey: key3, Timestamp: timestamp}, + {SeriesKey: key4, Timestamp: timestamp}, + {SeriesKey: key5, Timestamp: timestamp}, + {SeriesKey: key6, Timestamp: timestamp}, + } + + return data +} + +func expectedFilteredInitialItemsWithSameTimestamp(t *testing.T) []*Item { + timestamp, err := time.Parse(timestampLayout, timestamp1Str) + require.NoError(t, err) + + data := []*Item{ + {SeriesKey: key1, Timestamp: timestamp}, + {SeriesKey: key2, Timestamp: timestamp}, + } + + return data +} diff --git a/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder.go b/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder.go new file mode 100644 index 000000000000..8e7827734b52 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder.go @@ -0,0 +1,139 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterfactory // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filterfactory" + +import ( + "errors" + + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +type filterBuilder struct { + logger *zap.Logger + config *ItemFilterFactoryConfig +} + +func (b filterBuilder) buildFilterByMetricZeroTotalLimit() map[string]filter.ItemFilter { + filterByMetric := make(map[string]filter.ItemFilter) + nopFilter := filter.NewNopItemCardinalityFilter() + + for _, metadataItem := range b.config.MetadataItems { + for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { + metricFullName := metadataItem.MetricNamePrefix + metricValueMetadata.Name() + filterByMetric[metricFullName] = nopFilter + } + } + + return filterByMetric +} + +func (b filterBuilder) buildFilterByMetricPositiveTotalLimit() (map[string]filter.ItemFilter, error) { + filterByMetric := make(map[string]filter.ItemFilter) + groupedItems := groupByCardinality(b.config.MetadataItems) + + // Handle metric groups with low cardinality + lowCardinalityGroups := groupedItems[false] + newTotalLimit, err := b.handleLowCardinalityGroups(lowCardinalityGroups, b.config.TotalLimit, filterByMetric) + if err != nil { + return nil, err + } + + // Handle metric groups with high cardinality + highCardinalityGroups := groupedItems[true] + newTotalLimit, err = b.handleHighCardinalityGroups(highCardinalityGroups, newTotalLimit, filterByMetric) + if err != nil { + return nil, err + } + + b.logger.Debug("Remaining total limit after cardinality limits calculation", + zap.Int("remainingTotalLimit", newTotalLimit)) + + return filterByMetric, nil +} + +func (b filterBuilder) handleLowCardinalityGroups(groups []*metadata.MetricsMetadata, remainingTotalLimit int, + filterByMetric map[string]filter.ItemFilter) (int, error) { + + if len(groups) == 0 { + return remainingTotalLimit, nil + } + + limitPerMetricByTimestamp := b.config.ProjectAmount * b.config.InstanceAmount * b.config.DatabaseAmount + + // For low cardinality metrics total limit is equal to limit by timestamp + b.logger.Debug("Calculated cardinality limits for low cardinality metric group", + zap.Int("limitPerMetricByTimestamp", limitPerMetricByTimestamp)) + + return b.constructFiltersForGroups(limitPerMetricByTimestamp, limitPerMetricByTimestamp, groups, remainingTotalLimit, filterByMetric) +} + +func (b filterBuilder) handleHighCardinalityGroups(groups []*metadata.MetricsMetadata, remainingTotalLimit int, + filterByMetric map[string]filter.ItemFilter) (int, error) { + + if len(groups) == 0 { + return remainingTotalLimit, nil + } + + totalLimitPerMetric := remainingTotalLimit / countMetricsInGroups(groups) + limitPerMetricByTimestamp := totalLimitPerMetric / defaultMetricDataPointsAmountInPeriod + + b.logger.Debug("Calculated cardinality limits for high cardinality metric group", + zap.Int("limitPerMetricByTimestamp", limitPerMetricByTimestamp), + zap.Int("totalLimitPerMetric", totalLimitPerMetric)) + + if limitPerMetricByTimestamp < 1 { + return remainingTotalLimit, errors.New("limit per metric per timestamp for high cardinality metrics is lower than 1") + } + + return b.constructFiltersForGroups(totalLimitPerMetric, limitPerMetricByTimestamp, groups, remainingTotalLimit, filterByMetric) +} + +func (b filterBuilder) constructFiltersForGroups(totalLimitPerMetric int, limitPerMetricByTimestamp int, + groups []*metadata.MetricsMetadata, remainingTotalLimit int, filterByMetric map[string]filter.ItemFilter) (int, error) { + + newTotalLimit := remainingTotalLimit + + for _, metadataItem := range groups { + for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { + newTotalLimit -= totalLimitPerMetric + metricFullName := metadataItem.MetricNamePrefix + metricValueMetadata.Name() + + b.logger.Debug("Setting cardinality limits for metric", + zap.String("metricFullName", metricFullName), + zap.Int("limitPerMetricByTimestamp", limitPerMetricByTimestamp), + zap.Int("totalLimitPerMetric", totalLimitPerMetric), + zap.Int("remainingTotalLimit", newTotalLimit)) + + itemFilter, err := filter.NewItemCardinalityFilter(metricFullName, totalLimitPerMetric, + limitPerMetricByTimestamp, defaultItemActivityPeriod, b.logger) + if err != nil { + return remainingTotalLimit, err + } + filterByMetric[metricFullName] = itemFilter + } + } + + return newTotalLimit, nil +} + +func countMetricsInGroups(metadataItems []*metadata.MetricsMetadata) (amount int) { + for _, metadataItem := range metadataItems { + amount += len(metadataItem.QueryMetricValuesMetadata) + } + + return amount +} + +func groupByCardinality(metadataItems []*metadata.MetricsMetadata) map[bool][]*metadata.MetricsMetadata { + groupedItems := make(map[bool][]*metadata.MetricsMetadata) + + for _, metadataItem := range metadataItems { + groupedItems[metadataItem.HighCardinality] = append(groupedItems[metadataItem.HighCardinality], metadataItem) + } + + return groupedItems +} diff --git a/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder_test.go b/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder_test.go new file mode 100644 index 000000000000..7703fb4f5320 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder_test.go @@ -0,0 +1,268 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterfactory + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" +) + +func TestFilterBuilder_BuildFilterByMetricZeroTotalLimit(t *testing.T) { + logger := zaptest.NewLogger(t) + metricPrefixes := []string{prefix1, prefix2} + prefixHighCardinality := []bool{true, true} + metadataItems := generateMetadataItems(metricPrefixes, prefixHighCardinality) + config := &ItemFilterFactoryConfig{ + MetadataItems: metadataItems, + } + nopItemFilter := filter.NewNopItemCardinalityFilter() + builder := filterBuilder{ + logger: logger, + config: config, + } + + result := builder.buildFilterByMetricZeroTotalLimit() + + // Because we have 2 groups and each group has 2 metrics + assert.Len(t, result, len(metricPrefixes)*2) + for _, metadataItem := range metadataItems { + for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { + f, exists := result[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] + assert.True(t, exists) + assert.Equal(t, nopItemFilter, f) + } + } +} + +func TestFilterBuilder_BuildFilterByMetricPositiveTotalLimit(t *testing.T) { + logger := zaptest.NewLogger(t) + testCases := map[string]struct { + metricPrefixes []string + prefixHighCardinality []bool + totalLimit int + projectAmount int + instanceAmount int + databaseAmount int + expectedHighCardinalityTotalLimit int + expectedHighCardinalityLimitByTimestamp int + expectError bool + }{ + "Happy path with 2 high cardinality groups": {[]string{prefix1, prefix2}, []bool{true, true}, 200 * defaultMetricDataPointsAmountInPeriod, 1, 2, 5, 72000, 50, false}, + "Happy path with 2 low cardinality groups": {[]string{prefix1, prefix2}, []bool{false, false}, 200, 1, 2, 5, 0, 0, false}, + "Happy path with 1 low and 1 high cardinality groups": {[]string{prefix1, prefix2}, []bool{false, true}, 200*defaultMetricDataPointsAmountInPeriod + 20, 1, 2, 5, 144000, 100, false}, + "Error when limit by timestamp is lower than 1 for high cardinality groups": {[]string{prefix1, prefix2}, []bool{true, true}, 200, 1, 2, 5, 0, 0, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metadataItems := generateMetadataItems(testCase.metricPrefixes, testCase.prefixHighCardinality) + config := &ItemFilterFactoryConfig{ + MetadataItems: metadataItems, + TotalLimit: testCase.totalLimit, + ProjectAmount: testCase.projectAmount, + InstanceAmount: testCase.instanceAmount, + DatabaseAmount: testCase.databaseAmount, + } + builder := filterBuilder{ + logger: logger, + config: config, + } + + result, err := builder.buildFilterByMetricPositiveTotalLimit() + if testCase.expectError { + require.Error(t, err) + return + } + require.NoError(t, err) + + // Because we have 2 groups and each group has 2 metrics + assert.Len(t, result, len(testCase.metricPrefixes)*2) + for _, metadataItem := range metadataItems { + for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { + f, exists := result[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] + assert.True(t, exists) + if metadataItem.HighCardinality { + assert.Equal(t, testCase.expectedHighCardinalityTotalLimit, f.TotalLimit()) + assert.Equal(t, testCase.expectedHighCardinalityLimitByTimestamp, f.LimitByTimestamp()) + } else { + // For low cardinality group both limits are equal to projectAmount * instanceAmount * databaseAmount + expectedLimit := testCase.projectAmount * testCase.instanceAmount * testCase.databaseAmount + assert.Equal(t, expectedLimit, f.TotalLimit()) + assert.Equal(t, expectedLimit, f.LimitByTimestamp()) + } + assert.NoError(t, f.Shutdown()) + } + } + }) + } +} + +func TestFilterBuilder_HandleLowCardinalityGroups(t *testing.T) { + logger := zaptest.NewLogger(t) + testCases := map[string]struct { + metricPrefixes []string + prefixHighCardinality []bool + totalLimit int + projectAmount int + instanceAmount int + databaseAmount int + expectedRemainingTotalLimit int + }{ + "With 2 low cardinality groups": {[]string{prefix1, prefix2}, []bool{false, false}, 50, 1, 2, 5, 10}, + "With 0 low cardinality groups": {[]string{}, []bool{}, 50, 1, 2, 5, 50}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metadataItems := generateMetadataItems(testCase.metricPrefixes, testCase.prefixHighCardinality) + config := &ItemFilterFactoryConfig{ + MetadataItems: metadataItems, + TotalLimit: testCase.totalLimit, + ProjectAmount: testCase.projectAmount, + InstanceAmount: testCase.instanceAmount, + DatabaseAmount: testCase.databaseAmount, + } + builder := filterBuilder{ + logger: logger, + config: config, + } + + filterByMetric := make(map[string]filter.ItemFilter) + remainingTotalLimit, err := builder.handleLowCardinalityGroups(metadataItems, testCase.totalLimit, filterByMetric) + require.NoError(t, err) + + // Because we have 2 groups and each group has 2 metrics + assert.Len(t, filterByMetric, len(testCase.metricPrefixes)*2) + for _, metadataItem := range metadataItems { + for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { + f, exists := filterByMetric[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] + assert.True(t, exists) + // For low cardinality group both limits are equal to projectAmount * instanceAmount * databaseAmount + expectedLimit := testCase.projectAmount * testCase.instanceAmount * testCase.databaseAmount + assert.Equal(t, expectedLimit, f.TotalLimit()) + assert.Equal(t, expectedLimit, f.LimitByTimestamp()) + assert.Equal(t, testCase.expectedRemainingTotalLimit, remainingTotalLimit) + assert.NoError(t, f.Shutdown()) + } + } + }) + } +} + +func TestFilterBuilder_HandleHighCardinalityGroups(t *testing.T) { + logger := zaptest.NewLogger(t) + testCases := map[string]struct { + metricPrefixes []string + prefixHighCardinality []bool + totalLimit int + expectedHighCardinalityTotalLimit int + expectedHighCardinalityLimitByTimestamp int + expectedRemainingTotalLimit int + expectError bool + }{ + "With 2 high cardinality groups": {[]string{prefix1, prefix2}, []bool{true, true}, 200 * defaultMetricDataPointsAmountInPeriod, 72000, 50, 0, false}, + "With zero high cardinality groups": {[]string{}, []bool{}, 200, 0, 0, 200, false}, + "Error when limit by timestamp is lower than 1 for high cardinality groups": {[]string{prefix1, prefix2}, []bool{true, true}, 200, 0, 0, 200, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metadataItems := generateMetadataItems(testCase.metricPrefixes, testCase.prefixHighCardinality) + config := &ItemFilterFactoryConfig{ + MetadataItems: metadataItems, + TotalLimit: testCase.totalLimit, + ProjectAmount: 1, + InstanceAmount: 2, + DatabaseAmount: 5, + } + builder := filterBuilder{ + logger: logger, + config: config, + } + filterByMetric := make(map[string]filter.ItemFilter) + remainingTotalLimit, err := builder.handleHighCardinalityGroups(metadataItems, testCase.totalLimit, filterByMetric) + if testCase.expectError { + require.Error(t, err) + return + } + require.NoError(t, err) + + // Because we have 2 groups and each group has 2 metrics + assert.Len(t, filterByMetric, len(testCase.metricPrefixes)*2) + for _, metadataItem := range metadataItems { + for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { + f, exists := filterByMetric[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] + assert.True(t, exists) + assert.Equal(t, testCase.expectedHighCardinalityTotalLimit, f.TotalLimit()) + assert.Equal(t, testCase.expectedHighCardinalityLimitByTimestamp, f.LimitByTimestamp()) + assert.Equal(t, testCase.expectedRemainingTotalLimit, remainingTotalLimit) + assert.NoError(t, f.Shutdown()) + } + } + }) + } +} + +func TestFilterBuilder_TestConstructFiltersForGroups(t *testing.T) { + logger := zaptest.NewLogger(t) + metricPrefixes := []string{prefix1, prefix2} + prefixHighCardinality := []bool{true, true} + metadataItems := generateMetadataItems(metricPrefixes, prefixHighCardinality) + config := &ItemFilterFactoryConfig{ + MetadataItems: metadataItems, + } + builder := filterBuilder{ + logger: logger, + config: config, + } + filterByMetric := make(map[string]filter.ItemFilter) + const totalLimitPerMetric, limitPerMetricByTimestamp, remainingTotalLimit, expectedRemainingTotalLimit = 50, 10, 200, 0 + + result, err := builder.constructFiltersForGroups(totalLimitPerMetric, limitPerMetricByTimestamp, metadataItems, + remainingTotalLimit, filterByMetric) + require.NoError(t, err) + + // Because we have 2 groups and each group has 2 metrics + assert.Len(t, filterByMetric, len(metricPrefixes)*2) + for _, metadataItem := range metadataItems { + for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { + f, exists := filterByMetric[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] + assert.True(t, exists) + assert.Equal(t, totalLimitPerMetric, f.TotalLimit()) + assert.Equal(t, limitPerMetricByTimestamp, f.LimitByTimestamp()) + assert.Equal(t, expectedRemainingTotalLimit, result) + assert.NoError(t, f.Shutdown()) + } + } +} + +func TestCountMetricsInGroups(t *testing.T) { + metricPrefixes := []string{prefix1, prefix2} + prefixHighCardinality := []bool{true, true} + metadataItems := generateMetadataItems(metricPrefixes, prefixHighCardinality) + + assert.Equal(t, 4, countMetricsInGroups(metadataItems)) +} + +func TestGroupByCardinality(t *testing.T) { + metricPrefixes := []string{"prefix1-", "prefix2-"} + prefixHighCardinality := []bool{false, true} + metadataItems := generateMetadataItems(metricPrefixes, prefixHighCardinality) + + result := groupByCardinality(metadataItems) + + assert.Len(t, result, 2) + + for _, metadataItem := range metadataItems { + groups, exists := result[metadataItem.HighCardinality] + assert.True(t, exists) + assert.Len(t, groups, 1) + assert.Equal(t, metadataItem, groups[0]) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/filterfactory/itemfilterfactory.go b/receiver/googlecloudspannerreceiver/internal/filterfactory/itemfilterfactory.go new file mode 100644 index 000000000000..3d4117eb7a7d --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filterfactory/itemfilterfactory.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterfactory // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filterfactory" + +import ( + "errors" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + defaultMetricDataPointsAmountInPeriod = 24 * 60 + defaultItemActivityPeriod = 24 * time.Hour +) + +type itemFilterFactory struct { + filterByMetric map[string]filter.ItemFilter +} + +type ItemFilterFactoryConfig struct { + MetadataItems []*metadata.MetricsMetadata + TotalLimit int + ProjectAmount int + InstanceAmount int + DatabaseAmount int +} + +func NewItemFilterResolver(logger *zap.Logger, config *ItemFilterFactoryConfig) (filter.ItemFilterResolver, error) { + if err := config.validate(); err != nil { + return nil, err + } + + builder := filterBuilder{ + logger: logger, + config: config, + } + + if config.TotalLimit == 0 { + return &itemFilterFactory{ + filterByMetric: builder.buildFilterByMetricZeroTotalLimit(), + }, nil + } + + filterByMetric, err := builder.buildFilterByMetricPositiveTotalLimit() + if err != nil { + return nil, err + } + + return &itemFilterFactory{ + filterByMetric: filterByMetric, + }, nil +} + +func (config *ItemFilterFactoryConfig) validate() error { + if len(config.MetadataItems) == 0 { + return errors.New("metadata items cannot be empty or nil") + } + + if config.TotalLimit != 0 && config.TotalLimit <= (config.ProjectAmount*config.InstanceAmount*config.DatabaseAmount) { + return errors.New("total limit is too low and doesn't cover configured projects * instances * databases") + } + + return nil +} + +func (f *itemFilterFactory) Resolve(metricFullName string) (filter.ItemFilter, error) { + itemFilter, exists := f.filterByMetric[metricFullName] + + if !exists { + return nil, fmt.Errorf("can't find item filter for metric with full name %q", metricFullName) + } + + return itemFilter, nil +} + +func (f *itemFilterFactory) Shutdown() error { + for _, itemFilter := range f.filterByMetric { + err := itemFilter.Shutdown() + if err != nil { + return err + } + } + + return nil +} diff --git a/receiver/googlecloudspannerreceiver/internal/filterfactory/itemfilterfactory_test.go b/receiver/googlecloudspannerreceiver/internal/filterfactory/itemfilterfactory_test.go new file mode 100644 index 000000000000..3f07d8b6cebe --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filterfactory/itemfilterfactory_test.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterfactory + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +func TestNewItemFilterResolver(t *testing.T) { + logger := zaptest.NewLogger(t) + metricPrefixes := []string{prefix1, prefix2} + prefixHighCardinality := []bool{true, true} + metadataItems := generateMetadataItems(metricPrefixes, prefixHighCardinality) + testCases := map[string]struct { + totalLimit int + expectError bool + }{ + "Total limit is zero": {0, false}, + "Total limit is positive": {200 * defaultMetricDataPointsAmountInPeriod, false}, + "Total limit is lover then product of amounts": {3, true}, + "Error when limit by timestamp is lower than 1 for high cardinality groups": {20, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + config := &ItemFilterFactoryConfig{ + MetadataItems: metadataItems, + TotalLimit: testCase.totalLimit, + ProjectAmount: 1, + InstanceAmount: 2, + DatabaseAmount: 5, + } + + factory, err := NewItemFilterResolver(logger, config) + + if testCase.expectError { + require.Error(t, err) + require.Nil(t, factory) + } else { + require.NoError(t, err) + require.NoError(t, factory.Shutdown()) + } + }) + } +} + +func TestItemFilterFactoryConfig_Validate(t *testing.T) { + testCases := map[string]struct { + metadataItems []*metadata.MetricsMetadata + totalLimit int + projectAmount int + instanceAmount int + databaseAmount int + expectError bool + }{ + "No metadata items": {[]*metadata.MetricsMetadata{}, 10, 1, 1, 1, true}, + "Total limit is zero": {[]*metadata.MetricsMetadata{{}}, 0, 1, 1, 1, false}, + "Total limit is lover then product of amounts": {[]*metadata.MetricsMetadata{{}}, 3, 1, 2, 3, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + config := &ItemFilterFactoryConfig{ + MetadataItems: testCase.metadataItems, + TotalLimit: testCase.totalLimit, + ProjectAmount: testCase.projectAmount, + InstanceAmount: testCase.instanceAmount, + DatabaseAmount: testCase.databaseAmount, + } + + err := config.validate() + + if testCase.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestItemFilterFactory_Resolve(t *testing.T) { + itemFilter := filter.NewNopItemCardinalityFilter() + testCases := map[string]struct { + filterByMetric map[string]filter.ItemFilter + expectError bool + }{ + "Filter cannot be resolved": {map[string]filter.ItemFilter{}, true}, + "Filter can be resolved": {map[string]filter.ItemFilter{metricFullName: itemFilter}, false}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + factory := &itemFilterFactory{ + filterByMetric: testCase.filterByMetric, + } + + resolvedFilter, err := factory.Resolve(metricFullName) + + if testCase.expectError { + require.Error(t, err) + require.Nil(t, resolvedFilter) + } else { + require.NoError(t, err) + assert.Equal(t, itemFilter, resolvedFilter) + } + }) + } +} + +func TestItemFilterFactory_Shutdown(t *testing.T) { + testCases := map[string]struct { + expectedError error + }{ + "Error": {errors.New("error on shutdown")}, + "Happy path": {nil}, + } + + for name, testCase := range testCases { + mf := &mockFilter{} + t.Run(name, func(t *testing.T) { + factory := &itemFilterFactory{ + filterByMetric: map[string]filter.ItemFilter{metricFullName: mf}, + } + + mf.On("Shutdown").Return(testCase.expectedError) + + _ = factory.Shutdown() + + mf.AssertExpectations(t) + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/filterfactory/package_test.go b/receiver/googlecloudspannerreceiver/internal/filterfactory/package_test.go new file mode 100644 index 000000000000..f35caa4efc4f --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filterfactory/package_test.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterfactory + +import ( + "testing" + + "go.uber.org/goleak" +) + +// See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information on ignore. +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start")) +} diff --git a/receiver/googlecloudspannerreceiver/internal/filterfactory/testhelpers_test.go b/receiver/googlecloudspannerreceiver/internal/filterfactory/testhelpers_test.go new file mode 100644 index 000000000000..74a5d4ac1c57 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/filterfactory/testhelpers_test.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterfactory + +import ( + "github.com/stretchr/testify/mock" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + metricFullName = "metricFullName" + prefix1 = "prefix1-" + prefix2 = "prefix2-" +) + +type mockFilter struct { + mock.Mock +} + +func (f *mockFilter) Filter(source []*filter.Item) ([]*filter.Item, error) { + return source, nil +} + +func (f *mockFilter) Shutdown() error { + args := f.Called() + return args.Error(0) +} + +func (f *mockFilter) TotalLimit() int { + return 0 +} + +func (f *mockFilter) LimitByTimestamp() int { + return 0 +} + +func generateMetadataItems(prefixes []string, prefixHighCardinality []bool) []*metadata.MetricsMetadata { + metricDataType := metadata.NewMetricType(pmetric.MetricTypeGauge, pmetric.AggregationTemporalityUnspecified, false) + metadataItems := make([]*metadata.MetricsMetadata, len(prefixes)) + int64MetricValueMetadata, _ := metadata.NewMetricValueMetadata("int64", "int64Column", metricDataType, "int64Unit", metadata.IntValueType) + float64MetricValueMetadata, _ := metadata.NewMetricValueMetadata("float64", "float64Column", metricDataType, "float64Unit", metadata.FloatValueType) + + for i, prefix := range prefixes { + metadataItems[i] = &metadata.MetricsMetadata{ + MetricNamePrefix: prefix, + HighCardinality: prefixHighCardinality[i], + QueryMetricValuesMetadata: []metadata.MetricValueMetadata{ + int64MetricValueMetadata, + float64MetricValueMetadata, + }, + } + } + + return metadataItems +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/generated_status.go b/receiver/googlecloudspannerreceiver/internal/metadata/generated_status.go new file mode 100644 index 000000000000..0a73d082627c --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/generated_status.go @@ -0,0 +1,16 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("googlecloudspanner") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver" +) + +const ( + MetricsStability = component.StabilityLevelBeta +) diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue.go b/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue.go new file mode 100644 index 000000000000..b293ee766dc8 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue.go @@ -0,0 +1,301 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + +import ( + "fmt" + "sort" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" +) + +type newLabelValueFunction func(m LabelValueMetadata, value any) LabelValue + +type LabelValueMetadata interface { + ValueMetadata + ValueType() ValueType + NewLabelValue(value any) LabelValue +} + +type LabelValue interface { + Metadata() LabelValueMetadata + Value() any + SetValueTo(attributes pcommon.Map) +} + +type queryLabelValueMetadata struct { + name string + columnName string + valueType ValueType + newLabelValueFunc newLabelValueFunction + valueHolderFunc valueHolderFunction +} + +func (m queryLabelValueMetadata) ValueHolder() any { + return m.valueHolderFunc() +} + +func (m queryLabelValueMetadata) NewLabelValue(value any) LabelValue { + return m.newLabelValueFunc(m, value) +} + +func (m queryLabelValueMetadata) ValueType() ValueType { + return m.valueType +} + +type stringLabelValue struct { + metadata LabelValueMetadata + value string +} + +type int64LabelValue struct { + metadata LabelValueMetadata + value int64 +} + +type boolLabelValue struct { + metadata LabelValueMetadata + value bool +} + +type stringSliceLabelValue struct { + metadata LabelValueMetadata + value string +} + +type byteSliceLabelValue struct { + metadata LabelValueMetadata + value string +} + +type lockRequestSliceLabelValue struct { + metadata LabelValueMetadata + value string +} + +func (m queryLabelValueMetadata) Name() string { + return m.name +} + +func (m queryLabelValueMetadata) ColumnName() string { + return m.columnName +} + +func (v stringLabelValue) Metadata() LabelValueMetadata { + return v.metadata +} + +func (v stringLabelValue) Value() any { + return v.value +} + +func (v stringLabelValue) SetValueTo(attributes pcommon.Map) { + attributes.PutStr(v.metadata.Name(), v.value) +} + +func newStringLabelValue(metadata LabelValueMetadata, valueHolder any) LabelValue { + return stringLabelValue{ + metadata: metadata, + value: *valueHolder.(*string), + } +} + +func (v int64LabelValue) Metadata() LabelValueMetadata { + return v.metadata +} + +func (v int64LabelValue) Value() any { + return v.value +} + +func (v int64LabelValue) SetValueTo(attributes pcommon.Map) { + attributes.PutInt(v.metadata.Name(), v.value) +} + +func newInt64LabelValue(metadata LabelValueMetadata, valueHolder any) LabelValue { + return int64LabelValue{ + metadata: metadata, + value: *valueHolder.(*int64), + } +} + +func (v boolLabelValue) Metadata() LabelValueMetadata { + return v.metadata +} + +func (v boolLabelValue) Value() any { + return v.value +} + +func (v boolLabelValue) SetValueTo(attributes pcommon.Map) { + attributes.PutBool(v.metadata.Name(), v.value) +} + +func newBoolLabelValue(metadata LabelValueMetadata, valueHolder any) LabelValue { + return boolLabelValue{ + metadata: metadata, + value: *valueHolder.(*bool), + } +} + +func (v stringSliceLabelValue) Metadata() LabelValueMetadata { + return v.metadata +} + +func (v stringSliceLabelValue) Value() any { + return v.value +} + +func (v stringSliceLabelValue) SetValueTo(attributes pcommon.Map) { + attributes.PutStr(v.metadata.Name(), v.value) +} + +func newStringSliceLabelValue(metadata LabelValueMetadata, valueHolder any) LabelValue { + value := *valueHolder.(*[]string) + + sort.Strings(value) + + sortedAndConstructedValue := strings.Join(value, ",") + + return stringSliceLabelValue{ + metadata: metadata, + value: sortedAndConstructedValue, + } +} + +func (v byteSliceLabelValue) Metadata() LabelValueMetadata { + return v.metadata +} + +func (v byteSliceLabelValue) Value() any { + return v.value +} + +func (v byteSliceLabelValue) SetValueTo(attributes pcommon.Map) { + attributes.PutStr(v.metadata.Name(), v.value) +} + +func (v *byteSliceLabelValue) ModifyValue(s string) { + v.value = s +} + +func (v *stringSliceLabelValue) ModifyValue(s string) { + v.value = s +} + +func (v *stringLabelValue) ModifyValue(s string) { + v.value = s +} + +func newByteSliceLabelValue(metadata LabelValueMetadata, valueHolder any) LabelValue { + return byteSliceLabelValue{ + metadata: metadata, + value: string(*valueHolder.(*[]byte)), + } +} + +func (v lockRequestSliceLabelValue) Metadata() LabelValueMetadata { + return v.metadata +} + +func (v lockRequestSliceLabelValue) Value() any { + return v.value +} + +func (v lockRequestSliceLabelValue) SetValueTo(attributes pcommon.Map) { + attributes.PutStr(v.metadata.Name(), v.value) +} + +type lockRequest struct { + LockMode string `spanner:"lock_mode"` + Column string `spanner:"column"` + TransactionTag string `spanner:"transaction_tag"` +} + +func newLockRequestSliceLabelValue(metadata LabelValueMetadata, valueHolder any) LabelValue { + value := *valueHolder.(*[]*lockRequest) + // During the specifics of this label we need to take into account only distinct values + uniqueValueItems := make(map[string]struct{}) + var convertedValue []string + + for _, valueItem := range value { + var valueItemString string + if valueItem.TransactionTag == "" { + valueItemString = fmt.Sprintf("{%v,%v}", valueItem.LockMode, valueItem.Column) + } else { + valueItemString = fmt.Sprintf("{%v,%v,%v}", valueItem.LockMode, valueItem.Column, valueItem.TransactionTag) + } + + if _, contains := uniqueValueItems[valueItemString]; !contains { + uniqueValueItems[valueItemString] = struct{}{} + convertedValue = append(convertedValue, valueItemString) + } + } + + sort.Strings(convertedValue) + + constructedValue := strings.Join(convertedValue, ",") + + return lockRequestSliceLabelValue{ + metadata: metadata, + value: constructedValue, + } +} + +func NewLabelValueMetadata(name string, columnName string, valueType ValueType) (LabelValueMetadata, error) { + var newLabelValueFunc newLabelValueFunction + var valueHolderFunc valueHolderFunction + + switch valueType { + case StringValueType: + newLabelValueFunc = newStringLabelValue + valueHolderFunc = func() any { + var valueHolder string + return &valueHolder + } + case IntValueType: + newLabelValueFunc = newInt64LabelValue + valueHolderFunc = func() any { + var valueHolder int64 + return &valueHolder + } + case BoolValueType: + newLabelValueFunc = newBoolLabelValue + valueHolderFunc = func() any { + var valueHolder bool + return &valueHolder + } + case StringSliceValueType: + newLabelValueFunc = newStringSliceLabelValue + valueHolderFunc = func() any { + var valueHolder []string + return &valueHolder + } + case ByteSliceValueType: + newLabelValueFunc = newByteSliceLabelValue + valueHolderFunc = func() any { + var valueHolder []byte + return &valueHolder + } + case LockRequestSliceValueType: + newLabelValueFunc = newLockRequestSliceLabelValue + valueHolderFunc = func() any { + var valueHolder []*lockRequest + return &valueHolder + } + case UnknownValueType, FloatValueType, NullFloatValueType: + fallthrough + default: + return nil, fmt.Errorf("invalid value type received for label %q", name) + } + + return queryLabelValueMetadata{ + name: name, + columnName: columnName, + valueType: valueType, + newLabelValueFunc: newLabelValueFunc, + valueHolderFunc: valueHolderFunc, + }, nil +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue_test.go new file mode 100644 index 000000000000..b0c0795baef5 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue_test.go @@ -0,0 +1,288 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +func TestStringLabelValueMetadata(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, StringValueType) + + assert.Equal(t, StringValueType, metadata.ValueType()) + assert.Equal(t, labelName, metadata.Name()) + assert.Equal(t, labelColumnName, metadata.ColumnName()) + + var expectedType *string + + assert.IsType(t, expectedType, metadata.ValueHolder()) +} + +func TestInt64LabelValueMetadata(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, IntValueType) + + assert.Equal(t, IntValueType, metadata.ValueType()) + assert.Equal(t, labelName, metadata.Name()) + assert.Equal(t, labelColumnName, metadata.ColumnName()) + + var expectedType *int64 + + assert.IsType(t, expectedType, metadata.ValueHolder()) +} + +func TestBoolLabelValueMetadata(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, BoolValueType) + + assert.Equal(t, BoolValueType, metadata.ValueType()) + assert.Equal(t, labelName, metadata.Name()) + assert.Equal(t, labelColumnName, metadata.ColumnName()) + + var expectedType *bool + + assert.IsType(t, expectedType, metadata.ValueHolder()) +} + +func TestStringSliceLabelValueMetadata(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, StringSliceValueType) + + assert.Equal(t, StringSliceValueType, metadata.ValueType()) + assert.Equal(t, labelName, metadata.Name()) + assert.Equal(t, labelColumnName, metadata.ColumnName()) + + var expectedType *[]string + + assert.IsType(t, expectedType, metadata.ValueHolder()) +} + +func TestByteSliceLabelValueMetadata(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, ByteSliceValueType) + + assert.Equal(t, ByteSliceValueType, metadata.ValueType()) + assert.Equal(t, labelName, metadata.Name()) + assert.Equal(t, labelColumnName, metadata.ColumnName()) + + var expectedType *[]byte + + assert.IsType(t, expectedType, metadata.ValueHolder()) +} + +func TestLockRequestSliceLabelValueMetadata(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, LockRequestSliceValueType) + + assert.Equal(t, LockRequestSliceValueType, metadata.ValueType()) + assert.Equal(t, labelName, metadata.Name()) + assert.Equal(t, labelColumnName, metadata.ColumnName()) + + var expectedType *[]*lockRequest + + assert.IsType(t, expectedType, metadata.ValueHolder()) +} + +func TestUnknownLabelValueMetadata(t *testing.T) { + metadata, err := NewLabelValueMetadata(labelName, labelColumnName, UnknownValueType) + + require.Error(t, err) + require.Nil(t, metadata) +} + +func TestStringLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, StringValueType) + labelValue := stringLabelValue{ + metadata: metadata, + value: stringValue, + } + + assert.Equal(t, StringValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, stringValue, labelValue.Value()) + + attributes := pcommon.NewMap() + + labelValue.SetValueTo(attributes) + + attributeValue, exists := attributes.Get(labelName) + + assert.True(t, exists) + assert.Equal(t, stringValue, attributeValue.Str()) +} + +func TestInt64LabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, IntValueType) + labelValue := int64LabelValue{ + metadata: metadata, + value: int64Value, + } + + assert.Equal(t, IntValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, int64Value, labelValue.Value()) + + attributes := pcommon.NewMap() + + labelValue.SetValueTo(attributes) + + attributeValue, exists := attributes.Get(labelName) + + assert.True(t, exists) + assert.Equal(t, int64Value, attributeValue.Int()) +} + +func TestBoolLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, BoolValueType) + labelValue := boolLabelValue{ + metadata: metadata, + value: boolValue, + } + + assert.Equal(t, BoolValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, boolValue, labelValue.Value()) + + attributes := pcommon.NewMap() + + labelValue.SetValueTo(attributes) + + attributeValue, exists := attributes.Get(labelName) + + assert.True(t, exists) + assert.Equal(t, boolValue, attributeValue.Bool()) +} + +func TestStringSliceLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, StringSliceValueType) + labelValue := stringSliceLabelValue{ + metadata: metadata, + value: stringValue, + } + + assert.Equal(t, StringSliceValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, stringValue, labelValue.Value()) + + attributes := pcommon.NewMap() + + labelValue.SetValueTo(attributes) + + attributeValue, exists := attributes.Get(labelName) + + assert.True(t, exists) + assert.Equal(t, stringValue, attributeValue.Str()) +} + +func TestByteSliceLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, ByteSliceValueType) + labelValue := byteSliceLabelValue{ + metadata: metadata, + value: stringValue, + } + + assert.Equal(t, ByteSliceValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, stringValue, labelValue.Value()) + + attributes := pcommon.NewMap() + + labelValue.SetValueTo(attributes) + + attributeValue, exists := attributes.Get(labelName) + + assert.True(t, exists) + assert.Equal(t, stringValue, attributeValue.Str()) + + labelValue.ModifyValue(labelName) + assert.Equal(t, labelName, labelValue.Value()) +} + +func TestLockRequestSliceLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, LockRequestSliceValueType) + labelValue := lockRequestSliceLabelValue{ + metadata: metadata, + value: stringValue, + } + + assert.Equal(t, LockRequestSliceValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, stringValue, labelValue.Value()) + + attributes := pcommon.NewMap() + + labelValue.SetValueTo(attributes) + + attributeValue, exists := attributes.Get(labelName) + + assert.True(t, exists) + assert.Equal(t, stringValue, attributeValue.Str()) +} + +func TestNewStringLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, StringValueType) + value := stringValue + valueHolder := &value + + labelValue := newStringLabelValue(metadata, valueHolder) + + assert.Equal(t, StringValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, stringValue, labelValue.Value()) +} + +func TestNewInt64LabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, IntValueType) + value := int64Value + valueHolder := &value + + labelValue := newInt64LabelValue(metadata, valueHolder) + + assert.Equal(t, IntValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, int64Value, labelValue.Value()) +} + +func TestNewBoolLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, BoolValueType) + value := boolValue + valueHolder := &value + + labelValue := newBoolLabelValue(metadata, valueHolder) + + assert.Equal(t, BoolValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, boolValue, labelValue.Value()) +} + +func TestNewStringSliceLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, StringSliceValueType) + value := []string{"b", "a", "c"} + expectedValue := "a,b,c" + valueHolder := &value + + labelValue := newStringSliceLabelValue(metadata, valueHolder) + + assert.Equal(t, StringSliceValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, expectedValue, labelValue.Value()) +} + +func TestNewByteSliceLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, ByteSliceValueType) + value := []byte(stringValue) + valueHolder := &value + + labelValue := newByteSliceLabelValue(metadata, valueHolder) + + assert.Equal(t, ByteSliceValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, stringValue, labelValue.Value()) +} + +func TestNewLockRequestSliceLabelValue(t *testing.T) { + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, LockRequestSliceValueType) + value := []*lockRequest{ + {LockMode: "lockMode1", Column: "column1", TransactionTag: "tag1"}, + {LockMode: "lockMode2", Column: "column2", TransactionTag: "tag2"}, + {LockMode: "lockMode1", Column: "column1", TransactionTag: "tag1"}, + {LockMode: "lockMode2", Column: "column2", TransactionTag: "tag2"}, + {LockMode: "lockMode3", Column: "column3"}, + } + expectedValue := "{lockMode1,column1,tag1},{lockMode2,column2,tag2},{lockMode3,column3}" + valueHolder := &value + + labelValue := newLockRequestSliceLabelValue(metadata, valueHolder) + + assert.Equal(t, LockRequestSliceValueType, labelValue.Metadata().ValueType()) + assert.Equal(t, expectedValue, labelValue.Value()) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metadata_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metadata_test.go new file mode 100644 index 000000000000..b4c6554d1828 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metadata_test.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "go.opentelemetry.io/collector/pdata/pmetric" +) + +const ( + labelName = "LabelName" + labelColumnName = "LabelColumnName" + + stringValue = "stringValue" + int64Value = int64(64) + float64Value = float64(64.64) + defaultNullFloat64Value = float64(0) + boolValue = true + + metricName = "metricName" + metricColumnName = "metricColumnName" + metricDataType = pmetric.MetricTypeGauge + metricUnit = "metricUnit" + metricNamePrefix = "metricNamePrefix-" + + timestampColumnName = "INTERVAL_END" +) diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype.go new file mode 100644 index 000000000000..93e2c0fa010a --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + +import "go.opentelemetry.io/collector/pdata/pmetric" + +type MetricType interface { + MetricType() pmetric.MetricType + AggregationTemporality() pmetric.AggregationTemporality + IsMonotonic() bool +} + +type metricValueDataType struct { + dataType pmetric.MetricType + aggregationTemporality pmetric.AggregationTemporality + isMonotonic bool +} + +func NewMetricType(dataType pmetric.MetricType, aggregationTemporality pmetric.AggregationTemporality, + isMonotonic bool) MetricType { + return metricValueDataType{ + dataType: dataType, + aggregationTemporality: aggregationTemporality, + isMonotonic: isMonotonic, + } +} + +func (metricValueDataType metricValueDataType) MetricType() pmetric.MetricType { + return metricValueDataType.dataType +} + +func (metricValueDataType metricValueDataType) AggregationTemporality() pmetric.AggregationTemporality { + return metricValueDataType.aggregationTemporality +} + +func (metricValueDataType metricValueDataType) IsMonotonic() bool { + return metricValueDataType.isMonotonic +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go new file mode 100644 index 000000000000..6c6f13aabc7a --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestNewMetricType(t *testing.T) { + metricDataType := NewMetricType(pmetric.MetricTypeGauge, pmetric.AggregationTemporalityDelta, true) + + require.NotNil(t, metricDataType) + assert.Equal(t, pmetric.MetricTypeGauge, metricDataType.MetricType()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, metricDataType.AggregationTemporality()) + assert.True(t, metricDataType.IsMonotonic()) +} + +func TestMetricValueDataType_MetricType(t *testing.T) { + valueDataType := metricValueDataType{dataType: pmetric.MetricTypeGauge} + + assert.Equal(t, pmetric.MetricTypeGauge, valueDataType.MetricType()) +} + +func TestMetricValueDataType_AggregationTemporality(t *testing.T) { + valueDataType := metricValueDataType{aggregationTemporality: pmetric.AggregationTemporalityDelta} + + assert.Equal(t, pmetric.AggregationTemporalityDelta, valueDataType.AggregationTemporality()) +} + +func TestMetricValueDataType_IsMonotonic(t *testing.T) { + valueDataType := metricValueDataType{isMonotonic: true} + + assert.True(t, valueDataType.IsMonotonic()) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder.go new file mode 100644 index 000000000000..dbd2750166e2 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder.go @@ -0,0 +1,136 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + +import ( + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" +) + +type MetricsBuilder interface { + Build(dataPoints []*MetricsDataPoint) (pmetric.Metrics, error) + Shutdown() error +} + +type metricsFromDataPointBuilder struct { + filterResolver filter.ItemFilterResolver +} + +func NewMetricsFromDataPointBuilder(filterResolver filter.ItemFilterResolver) MetricsBuilder { + return &metricsFromDataPointBuilder{ + filterResolver: filterResolver, + } +} + +func (b *metricsFromDataPointBuilder) Shutdown() error { + return b.filterResolver.Shutdown() +} + +func (b *metricsFromDataPointBuilder) Build(dataPoints []*MetricsDataPoint) (pmetric.Metrics, error) { + var metrics pmetric.Metrics + + groupedDataPoints, err := b.groupAndFilter(dataPoints) + if err != nil { + return pmetric.Metrics{}, err + } + + metrics = pmetric.NewMetrics() + rms := metrics.ResourceMetrics() + rm := rms.AppendEmpty() + + ilms := rm.ScopeMetrics() + ilm := ilms.AppendEmpty() + ilm.Scope().SetName(ScopeName) + + for key, points := range groupedDataPoints { + metric := ilm.Metrics().AppendEmpty() + metric.SetName(key.MetricName) + metric.SetUnit(key.MetricUnit) + + var dataPointSlice pmetric.NumberDataPointSlice + switch key.MetricType.MetricType() { + case pmetric.MetricTypeGauge: + dataPointSlice = metric.SetEmptyGauge().DataPoints() + case pmetric.MetricTypeSum: + metric.SetEmptySum().SetAggregationTemporality(key.MetricType.AggregationTemporality()) + metric.Sum().SetIsMonotonic(key.MetricType.IsMonotonic()) + dataPointSlice = metric.Sum().DataPoints() + case pmetric.MetricTypeEmpty, pmetric.MetricTypeHistogram, pmetric.MetricTypeExponentialHistogram, pmetric.MetricTypeSummary: + } + + for _, point := range points { + point.CopyTo(dataPointSlice.AppendEmpty()) + } + } + + return metrics, nil +} + +func (b *metricsFromDataPointBuilder) groupAndFilter(dataPoints []*MetricsDataPoint) (map[MetricsDataPointKey][]*MetricsDataPoint, error) { + if len(dataPoints) == 0 { + return nil, nil + } + + groupedDataPoints := make(map[MetricsDataPointKey][]*MetricsDataPoint) + + for _, dataPoint := range dataPoints { + groupingKey := dataPoint.GroupingKey() + groupedDataPoints[groupingKey] = append(groupedDataPoints[groupingKey], dataPoint) + } + + // Cardinality filtering + for groupingKey, points := range groupedDataPoints { + filteredPoints, err := b.filter(groupingKey.MetricName, points) + if err != nil { + return nil, err + } + + groupedDataPoints[groupingKey] = filteredPoints + } + + return groupedDataPoints, nil +} + +func (b *metricsFromDataPointBuilder) filter(metricName string, dataPoints []*MetricsDataPoint) ([]*MetricsDataPoint, error) { + itemFilter, err := b.filterResolver.Resolve(metricName) + if err != nil { + return nil, err + } + + itemsForFiltering := make([]*filter.Item, len(dataPoints)) + + for i, dataPoint := range dataPoints { + itemsForFiltering[i], err = dataPoint.ToItem() + if err != nil { + return nil, err + } + } + + filteredItems, err := itemFilter.Filter(itemsForFiltering) + if err != nil { + return nil, err + } + + // Creating new slice instead of removing elements from source slice because removing by value is not efficient operation. + // Need to use such approach for preserving data points order. + filteredItemsSet := make(map[filter.Item]struct{}) + + for _, filteredItem := range filteredItems { + filteredItemsSet[*filteredItem] = struct{}{} + } + + filteredDataPoints := make([]*MetricsDataPoint, len(filteredItems)) + nextFilteredDataPointIndex := 0 + for i, dataPointItem := range itemsForFiltering { + _, exists := filteredItemsSet[*dataPointItem] + + if exists { + filteredDataPoints[nextFilteredDataPointIndex] = dataPoints[i] + nextFilteredDataPointIndex++ + } + } + + return filteredDataPoints, nil +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go new file mode 100644 index 000000000000..294e7aa0f57c --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go @@ -0,0 +1,352 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" +) + +const ( + metricName1 = "metricName1" + metricName2 = "metricName2" +) + +type mockItemFilterResolver struct { + mock.Mock +} + +func (r *mockItemFilterResolver) Resolve(string) (filter.ItemFilter, error) { + args := r.Called() + return args.Get(0).(filter.ItemFilter), args.Error(1) + +} + +func (r *mockItemFilterResolver) Shutdown() error { + args := r.Called() + return args.Error(0) +} + +type errorFilter struct { +} + +func (f errorFilter) Filter(_ []*filter.Item) ([]*filter.Item, error) { + return nil, errors.New("error on filter") +} + +func (f errorFilter) Shutdown() error { + return nil +} + +func (f errorFilter) TotalLimit() int { + return 0 +} + +func (f errorFilter) LimitByTimestamp() int { + return 0 +} + +type testData struct { + dataPoints []*MetricsDataPoint + expectedGroupingKeys []MetricsDataPointKey + expectedGroups map[MetricsDataPointKey][]*MetricsDataPoint +} + +func TestNewMetricsFromDataPointBuilder(t *testing.T) { + filterResolver := filter.NewNopItemFilterResolver() + + builder := NewMetricsFromDataPointBuilder(filterResolver) + builderCasted := builder.(*metricsFromDataPointBuilder) + defer executeShutdown(t, builderCasted, false) + + assert.Equal(t, filterResolver, builderCasted.filterResolver) +} + +func TestMetricsFromDataPointBuilder_Build(t *testing.T) { + testCases := map[string]struct { + metricsDataType pmetric.MetricType + expectedError error + }{ + "Gauge": {pmetric.MetricTypeGauge, nil}, + "Sum": {pmetric.MetricTypeSum, nil}, + "Gauge with filtering error": {pmetric.MetricTypeGauge, errors.New("filtering error")}, + "Sum with filtering error": {pmetric.MetricTypeSum, errors.New("filtering error")}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + testMetricsFromDataPointBuilderBuild(t, testCase.metricsDataType, testCase.expectedError) + }) + } +} + +func testMetricsFromDataPointBuilderBuild(t *testing.T, metricDataType pmetric.MetricType, expectedError error) { + filterResolver := &mockItemFilterResolver{} + dataForTesting := generateTestData(metricDataType) + builder := &metricsFromDataPointBuilder{filterResolver: filterResolver} + defer executeMockedShutdown(t, builder, filterResolver, expectedError) + expectedGroupingKeysByMetricName := make(map[string]MetricsDataPointKey, len(dataForTesting.expectedGroupingKeys)) + + for _, expectedGroupingKey := range dataForTesting.expectedGroupingKeys { + expectedGroupingKeysByMetricName[expectedGroupingKey.MetricName] = expectedGroupingKey + } + + if expectedError != nil { + filterResolver.On("Resolve").Return(errorFilter{}, nil) + } else { + filterResolver.On("Resolve").Return(filter.NewNopItemCardinalityFilter(), nil) + } + + metric, err := builder.Build(dataForTesting.dataPoints) + + filterResolver.AssertExpectations(t) + + if expectedError != nil { + require.Error(t, err) + return + } + require.NoError(t, err) + + assert.Equal(t, len(dataForTesting.dataPoints), metric.DataPointCount()) + assert.Equal(t, len(dataForTesting.expectedGroups), metric.MetricCount()) + assert.Equal(t, 1, metric.ResourceMetrics().At(0).ScopeMetrics().Len()) + assert.Equal(t, len(dataForTesting.expectedGroups), metric.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len()) + require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver", metric.ResourceMetrics().At(0).ScopeMetrics().At(0).Scope().Name()) + + for i := 0; i < len(dataForTesting.expectedGroups); i++ { + ilMetric := metric.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(i) + expectedGroupingKey := expectedGroupingKeysByMetricName[ilMetric.Name()] + expectedDataPoints := dataForTesting.expectedGroups[expectedGroupingKey] + + for dataPointIndex, expectedDataPoint := range expectedDataPoints { + assert.Equal(t, expectedDataPoint.metricName, ilMetric.Name()) + assert.Equal(t, expectedDataPoint.metricValue.Metadata().Unit(), ilMetric.Unit()) + assert.Equal(t, expectedDataPoint.metricValue.Metadata().DataType().MetricType(), ilMetric.Type()) + + var dataPoint pmetric.NumberDataPoint + + if metricDataType == pmetric.MetricTypeGauge { + assert.NotNil(t, ilMetric.Gauge()) + assert.Equal(t, len(expectedDataPoints), ilMetric.Gauge().DataPoints().Len()) + dataPoint = ilMetric.Gauge().DataPoints().At(dataPointIndex) + } else { + assert.NotNil(t, ilMetric.Sum()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, ilMetric.Sum().AggregationTemporality()) + assert.True(t, ilMetric.Sum().IsMonotonic()) + assert.Equal(t, len(expectedDataPoints), ilMetric.Sum().DataPoints().Len()) + dataPoint = ilMetric.Sum().DataPoints().At(dataPointIndex) + } + + assertMetricValue(t, expectedDataPoint.metricValue, dataPoint) + + assert.Equal(t, pcommon.NewTimestampFromTime(expectedDataPoint.timestamp), dataPoint.Timestamp()) + // Adding +3 here because we'll always have 3 labels added for each metric: project_id, instance_id, database + assert.Equal(t, 3+len(expectedDataPoint.labelValues), dataPoint.Attributes().Len()) + + attributesMap := dataPoint.Attributes() + + assertDefaultLabels(t, attributesMap, expectedDataPoint.databaseID) + assertNonDefaultLabels(t, attributesMap, expectedDataPoint.labelValues) + } + } +} + +func TestMetricsFromDataPointBuilder_GroupAndFilter(t *testing.T) { + testCases := map[string]struct { + expectedError error + }{ + "Happy path": {nil}, + "With filtering error": {errors.New("filtering error")}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + filterResolver := &mockItemFilterResolver{} + builder := &metricsFromDataPointBuilder{ + filterResolver: filterResolver, + } + defer executeMockedShutdown(t, builder, filterResolver, testCase.expectedError) + dataForTesting := generateTestData(metricDataType) + + if testCase.expectedError != nil { + filterResolver.On("Resolve").Return(errorFilter{}, nil) + } else { + filterResolver.On("Resolve").Return(filter.NewNopItemCardinalityFilter(), testCase.expectedError) + } + + groupedDataPoints, err := builder.groupAndFilter(dataForTesting.dataPoints) + + filterResolver.AssertExpectations(t) + + if testCase.expectedError != nil { + require.Error(t, err) + require.Nil(t, groupedDataPoints) + return + } + require.NoError(t, err) + require.NotNil(t, groupedDataPoints) + + assert.Equal(t, len(dataForTesting.expectedGroups), len(groupedDataPoints)) + + for expectedGroupingKey, expectedGroupPoints := range dataForTesting.expectedGroups { + dataPointsByKey := groupedDataPoints[expectedGroupingKey] + + assert.Equal(t, len(expectedGroupPoints), len(dataPointsByKey)) + + for i, point := range expectedGroupPoints { + assert.Equal(t, point, dataPointsByKey[i]) + } + } + }) + } +} + +func TestMetricsFromDataPointBuilder_GroupAndFilter_NilDataPoints(t *testing.T) { + builder := &metricsFromDataPointBuilder{ + filterResolver: filter.NewNopItemFilterResolver(), + } + defer executeShutdown(t, builder, false) + + groupedDataPoints, err := builder.groupAndFilter(nil) + + require.NoError(t, err) + + assert.Empty(t, groupedDataPoints) +} + +func TestMetricsFromDataPointBuilder_Filter(t *testing.T) { + dataForTesting := generateTestData(metricDataType) + testCases := map[string]struct { + expectedError error + }{ + "Happy path": {nil}, + "Error on resolve": {errors.New("error on resolve")}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + filterResolver := &mockItemFilterResolver{} + builder := &metricsFromDataPointBuilder{ + filterResolver: filterResolver, + } + defer executeMockedShutdown(t, builder, filterResolver, testCase.expectedError) + + if testCase.expectedError != nil { + filterResolver.On("Resolve").Return(errorFilter{}, testCase.expectedError) + } else { + filterResolver.On("Resolve").Return(filter.NewNopItemCardinalityFilter(), testCase.expectedError) + } + + filteredDataPoints, err := builder.filter(metricName1, dataForTesting.dataPoints) + + filterResolver.AssertExpectations(t) + + if testCase.expectedError != nil { + require.Error(t, err) + require.Nil(t, filteredDataPoints) + } else { + require.NoError(t, err) + assert.Equal(t, dataForTesting.dataPoints, filteredDataPoints) + } + }) + } +} + +func TestMetricsFromDataPointBuilder_Shutdown(t *testing.T) { + testCases := map[string]struct { + expectedError error + }{ + "Happy path": {nil}, + "Error": {errors.New("shutdown error")}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + filterResolver := &mockItemFilterResolver{} + builder := &metricsFromDataPointBuilder{ + filterResolver: filterResolver, + } + + executeMockedShutdown(t, builder, filterResolver, testCase.expectedError) + }) + } +} + +func generateTestData(metricDataType pmetric.MetricType) testData { + timestamp1 := time.Now().UTC() + timestamp2 := timestamp1.Add(time.Minute) + labelValues := allPossibleLabelValues() + metricValues := allPossibleMetricValues(metricDataType) + + dataPoints := []*MetricsDataPoint{ + newMetricDataPoint(metricName1, timestamp1, labelValues, metricValues[0]), + newMetricDataPoint(metricName1, timestamp1, labelValues, metricValues[1]), + newMetricDataPoint(metricName2, timestamp1, labelValues, metricValues[0]), + newMetricDataPoint(metricName2, timestamp1, labelValues, metricValues[1]), + newMetricDataPoint(metricName1, timestamp2, labelValues, metricValues[0]), + newMetricDataPoint(metricName1, timestamp2, labelValues, metricValues[1]), + newMetricDataPoint(metricName2, timestamp2, labelValues, metricValues[0]), + newMetricDataPoint(metricName2, timestamp2, labelValues, metricValues[1]), + } + + expectedGroupingKeys := []MetricsDataPointKey{ + { + MetricName: metricName1, + MetricType: metricValues[0].Metadata().DataType(), + MetricUnit: metricValues[0].Metadata().Unit(), + }, + { + MetricName: metricName2, + MetricType: metricValues[0].Metadata().DataType(), + MetricUnit: metricValues[0].Metadata().Unit(), + }, + } + + expectedGroups := map[MetricsDataPointKey][]*MetricsDataPoint{ + expectedGroupingKeys[0]: { + dataPoints[0], dataPoints[1], dataPoints[4], dataPoints[5], + }, + expectedGroupingKeys[1]: { + dataPoints[2], dataPoints[3], dataPoints[6], dataPoints[7], + }, + } + + return testData{dataPoints: dataPoints, expectedGroupingKeys: expectedGroupingKeys, expectedGroups: expectedGroups} +} + +func newMetricDataPoint(metricName string, timestamp time.Time, labelValues []LabelValue, metricValue MetricValue) *MetricsDataPoint { + return &MetricsDataPoint{ + metricName: metricName, + timestamp: timestamp, + databaseID: databaseID(), + labelValues: labelValues, + metricValue: metricValue, + } +} + +func executeShutdown(t *testing.T, metricsBuilder MetricsBuilder, expectError bool) { + err := metricsBuilder.Shutdown() + if expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } +} + +func executeMockedShutdown(t *testing.T, metricsBuilder MetricsBuilder, filterResolver *mockItemFilterResolver, + expectedError error) { + + filterResolver.On("Shutdown").Return(expectedError) + _ = metricsBuilder.Shutdown() + filterResolver.AssertExpectations(t) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go new file mode 100644 index 000000000000..26f76aec92df --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go @@ -0,0 +1,186 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + +import ( + "fmt" + "hash/fnv" + "strings" + "time" + "unicode/utf8" + + "github.com/mitchellh/hashstructure" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" +) + +const ( + projectIDLabelName = "project_id" + instanceIDLabelName = "instance_id" + databaseLabelName = "database" +) + +type MetricsDataPointKey struct { + MetricName string + MetricUnit string + MetricType MetricType +} + +type MetricsDataPoint struct { + metricName string + timestamp time.Time + databaseID *datasource.DatabaseID + labelValues []LabelValue + metricValue MetricValue +} + +// Fields must be exported for hashing purposes +type dataForHashing struct { + MetricName string + Labels []label +} + +// Fields must be exported for hashing purposes +type label struct { + Name string + Value any +} + +func (mdp *MetricsDataPoint) CopyTo(dataPoint pmetric.NumberDataPoint) { + dataPoint.SetTimestamp(pcommon.NewTimestampFromTime(mdp.timestamp)) + + mdp.metricValue.SetValueTo(dataPoint) + + attributes := dataPoint.Attributes() + attributes.EnsureCapacity(3 + len(mdp.labelValues)) + attributes.PutStr(projectIDLabelName, mdp.databaseID.ProjectID()) + attributes.PutStr(instanceIDLabelName, mdp.databaseID.InstanceID()) + attributes.PutStr(databaseLabelName, mdp.databaseID.DatabaseName()) + for i := range mdp.labelValues { + mdp.labelValues[i].SetValueTo(attributes) + } +} + +func (mdp *MetricsDataPoint) GroupingKey() MetricsDataPointKey { + return MetricsDataPointKey{ + MetricName: mdp.metricName, + MetricUnit: mdp.metricValue.Metadata().Unit(), + MetricType: mdp.metricValue.Metadata().DataType(), + } +} + +func (mdp *MetricsDataPoint) ToItem() (*filter.Item, error) { + seriesKey, err := mdp.hash() + if err != nil { + return nil, err + } + + return &filter.Item{ + SeriesKey: seriesKey, + Timestamp: mdp.timestamp, + }, nil +} + +func (mdp *MetricsDataPoint) toDataForHashing() dataForHashing { + // Do not use map here because it has unpredicted order + // Taking into account 3 default labels: project_id, instance_id, database + labels := make([]label, len(mdp.labelValues)+3) + + labels[0] = label{Name: projectIDLabelName, Value: mdp.databaseID.ProjectID()} + labels[1] = label{Name: instanceIDLabelName, Value: mdp.databaseID.InstanceID()} + labels[2] = label{Name: databaseLabelName, Value: mdp.databaseID.DatabaseName()} + + labelsIndex := 3 + for _, labelValue := range mdp.labelValues { + labels[labelsIndex] = label{Name: labelValue.Metadata().Name(), Value: labelValue.Value()} + labelsIndex++ + } + + return dataForHashing{ + MetricName: mdp.metricName, + Labels: labels, + } +} + +// Convert row_range_start_key label of top-lock-stats metric from format "sample(key1, key2)" to "sample(hash1, hash2)" +func parseAndHashRowrangestartkey(key string) string { + builderHashedKey := strings.Builder{} + startIndexKeys := strings.Index(key, "(") + if startIndexKeys == -1 || startIndexKeys == len(key)-1 { // if "(" does not exist or is the last character of the string, then label is of incorrect format + return "" + } + substring := key[startIndexKeys+1 : len(key)-1] + builderHashedKey.WriteString(key[:startIndexKeys+1]) + plusPresent := false + if substring[len(substring)-1] == '+' { + substring = substring[:len(substring)-1] + plusPresent = true + } + keySlice := strings.Split(substring, ",") + hashFunction := fnv.New32a() + for cnt, subKey := range keySlice { + hashFunction.Reset() + hashFunction.Write([]byte(subKey)) + if cnt < len(keySlice)-1 { + builderHashedKey.WriteString(fmt.Sprint(hashFunction.Sum32()) + ",") + } else { + builderHashedKey.WriteString(fmt.Sprint(hashFunction.Sum32())) + } + } + if plusPresent { + builderHashedKey.WriteString("+") + } + builderHashedKey.WriteString(")") + return builderHashedKey.String() +} + +func (mdp *MetricsDataPoint) HideLockStatsRowrangestartkeyPII() { + for index, labelValue := range mdp.labelValues { + if labelValue.Metadata().Name() == "row_range_start_key" { + key := labelValue.Value().(string) + hashedKey := parseAndHashRowrangestartkey(key) + v := mdp.labelValues[index].(byteSliceLabelValue) + p := &v + p.ModifyValue(hashedKey) + mdp.labelValues[index] = v + } + } +} + +func TruncateString(str string, length int) string { + if length <= 0 { + return "" + } + + if utf8.RuneCountInString(str) < length { + return str + } + + return string([]rune(str)[:length]) +} + +func (mdp *MetricsDataPoint) TruncateQueryText(length int) { + for index, labelValue := range mdp.labelValues { + if labelValue.Metadata().Name() == "query_text" { + queryText := labelValue.Value().(string) + truncateQueryText := TruncateString(queryText, length) + v := mdp.labelValues[index].(stringLabelValue) + p := &v + p.ModifyValue(truncateQueryText) + mdp.labelValues[index] = v + } + } +} + +func (mdp *MetricsDataPoint) hash() (string, error) { + hashedData, err := hashstructure.Hash(mdp.toDataForHashing(), nil) + if err != nil { + return "", err + } + + return fmt.Sprintf("%x", hashedData), nil +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go new file mode 100644 index 000000000000..b837b0f6e04a --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go @@ -0,0 +1,296 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "fmt" + "hash/fnv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" +) + +const ( + // Value was generated using the same library. Intent is to detect that something changed in library implementation + // in case we received different value here. For more details inspect tests where this value is used. + expectedHashValue = "29282762c26450b7" +) + +func TestMetricsDataPoint_GroupingKey(t *testing.T) { + dataPoint := metricsDataPointForTests() + + groupingKey := dataPoint.GroupingKey() + + assert.NotNil(t, groupingKey) + assert.Equal(t, dataPoint.metricName, groupingKey.MetricName) + assert.Equal(t, dataPoint.metricValue.Metadata().Unit(), groupingKey.MetricUnit) + assert.Equal(t, dataPoint.metricValue.Metadata().DataType(), groupingKey.MetricType) +} + +func TestMetricsDataPoint_ToItem(t *testing.T) { + dataPoint := metricsDataPointForTests() + + item, err := dataPoint.ToItem() + require.NoError(t, err) + + assert.Equal(t, expectedHashValue, item.SeriesKey) + assert.Equal(t, dataPoint.timestamp, item.Timestamp) +} + +func TestMetricsDataPoint_ToDataForHashing(t *testing.T) { + dataPoint := metricsDataPointForTests() + + actual := dataPoint.toDataForHashing() + + assert.Equal(t, metricName, actual.MetricName) + + assertLabel(t, actual.Labels[0], projectIDLabelName, dataPoint.databaseID.ProjectID()) + assertLabel(t, actual.Labels[1], instanceIDLabelName, dataPoint.databaseID.InstanceID()) + assertLabel(t, actual.Labels[2], databaseLabelName, dataPoint.databaseID.DatabaseName()) + + labelsIndex := 3 + for _, labelValue := range dataPoint.labelValues { + assertLabel(t, actual.Labels[labelsIndex], labelValue.Metadata().Name(), labelValue.Value()) + labelsIndex++ + } +} + +func TestMetricsDataPoint_Hash(t *testing.T) { + dataPoint := metricsDataPointForTests() + + hashValue, err := dataPoint.hash() + require.NoError(t, err) + + assert.Equal(t, expectedHashValue, hashValue) +} + +func TestMetricsDataPoint_CopyTo(t *testing.T) { + timestamp := time.Now().UTC() + labelValues := allPossibleLabelValues() + metricValues := allPossibleMetricValues(metricDataType) + databaseID := databaseID() + + for _, metricValue := range metricValues { + dataPoint := pmetric.NewNumberDataPoint() + metricsDataPoint := &MetricsDataPoint{ + metricName: metricName, + timestamp: timestamp, + databaseID: databaseID, + labelValues: labelValues, + metricValue: metricValue, + } + + metricsDataPoint.CopyTo(dataPoint) + + assertMetricValue(t, metricValue, dataPoint) + + assert.Equal(t, pcommon.NewTimestampFromTime(timestamp), dataPoint.Timestamp()) + // Adding +3 here because we'll always have 3 labels added for each metric: project_id, instance_id, database + assert.Equal(t, 3+len(labelValues), dataPoint.Attributes().Len()) + + attributesMap := dataPoint.Attributes() + + assertDefaultLabels(t, attributesMap, databaseID) + assertNonDefaultLabels(t, attributesMap, labelValues) + } +} + +func TestMetricsDataPoint_HideLockStatsRowrangestartkeyPII(t *testing.T) { + btSliceLabelValueMetadata, _ := NewLabelValueMetadata("row_range_start_key", "byteSliceLabelColumnName", StringValueType) + labelValue1 := byteSliceLabelValue{metadata: btSliceLabelValueMetadata, value: "table1.s(23,hello,23+)"} + labelValue2 := byteSliceLabelValue{metadata: btSliceLabelValueMetadata, value: "table2(23,hello)"} + metricValues := allPossibleMetricValues(metricDataType) + labelValues := []LabelValue{labelValue1, labelValue2} + timestamp := time.Now().UTC() + metricsDataPoint := &MetricsDataPoint{ + metricName: metricName, + timestamp: timestamp, + databaseID: databaseID(), + labelValues: labelValues, + metricValue: metricValues[0], + } + hashFunction := fnv.New32a() + hashFunction.Reset() + hashFunction.Write([]byte("23")) + hashOf23 := fmt.Sprint(hashFunction.Sum32()) + hashFunction.Reset() + hashFunction.Write([]byte("hello")) + hashOfHello := fmt.Sprint(hashFunction.Sum32()) + + metricsDataPoint.HideLockStatsRowrangestartkeyPII() + + assert.Len(t, metricsDataPoint.labelValues, 2) + assert.Equal(t, metricsDataPoint.labelValues[0].Value(), "table1.s("+hashOf23+","+hashOfHello+","+hashOf23+"+)") + assert.Equal(t, metricsDataPoint.labelValues[1].Value(), "table2("+hashOf23+","+hashOfHello+")") +} + +func TestMetricsDataPoint_HideLockStatsRowrangestartkeyPIIWithInvalidLabelValue(t *testing.T) { + // We are checking that function HideLockStatsRowrangestartkeyPII() does not panic for invalid label values. + btSliceLabelValueMetadata, _ := NewLabelValueMetadata("row_range_start_key", "byteSliceLabelColumnName", StringValueType) + labelValue1 := byteSliceLabelValue{metadata: btSliceLabelValueMetadata, value: ""} + labelValue2 := byteSliceLabelValue{metadata: btSliceLabelValueMetadata, value: "table22(hello"} + labelValue3 := byteSliceLabelValue{metadata: btSliceLabelValueMetadata, value: "table22,hello"} + labelValue4 := byteSliceLabelValue{metadata: btSliceLabelValueMetadata, value: "("} + metricValues := allPossibleMetricValues(metricDataType) + labelValues := []LabelValue{labelValue1, labelValue2, labelValue3, labelValue4} + timestamp := time.Now().UTC() + metricsDataPoint := &MetricsDataPoint{ + metricName: metricName, + timestamp: timestamp, + databaseID: databaseID(), + labelValues: labelValues, + metricValue: metricValues[0], + } + metricsDataPoint.HideLockStatsRowrangestartkeyPII() + assert.Len(t, metricsDataPoint.labelValues, 4) +} + +func TestMetricsDataPoint_TruncateQueryText(t *testing.T) { + strLabelValueMetadata, _ := NewLabelValueMetadata("query_text", "stringLabelColumnName", StringValueType) + labelValue1 := stringLabelValue{metadata: strLabelValueMetadata, value: "SELECT 1"} + metricValues := allPossibleMetricValues(metricDataType) + labelValues := []LabelValue{labelValue1} + timestamp := time.Now().UTC() + metricsDataPoint := &MetricsDataPoint{ + metricName: metricName, + timestamp: timestamp, + databaseID: databaseID(), + labelValues: labelValues, + metricValue: metricValues[0], + } + + metricsDataPoint.TruncateQueryText(6) + + assert.Len(t, metricsDataPoint.labelValues, 1) + assert.Equal(t, "SELECT", metricsDataPoint.labelValues[0].Value()) +} + +func allPossibleLabelValues() []LabelValue { + strLabelValueMetadata, _ := NewLabelValueMetadata("stringLabelName", "stringLabelColumnName", StringValueType) + strLabelValue := stringLabelValue{ + metadata: strLabelValueMetadata, + value: stringValue, + } + bLabelValueMetadata, _ := NewLabelValueMetadata("boolLabelName", "boolLabelColumnName", BoolValueType) + bLabelValue := boolLabelValue{ + metadata: bLabelValueMetadata, + value: boolValue, + } + i64LabelValueMetadata, _ := NewLabelValueMetadata("int64LabelName", "int64LabelColumnName", StringValueType) + i64LabelValue := int64LabelValue{ + metadata: i64LabelValueMetadata, + value: int64Value, + } + strSliceLabelValueMetadata, _ := NewLabelValueMetadata("stringSliceLabelName", "stringSliceLabelColumnName", StringValueType) + strSliceLabelValue := stringSliceLabelValue{ + metadata: strSliceLabelValueMetadata, + value: stringValue, + } + btSliceLabelValueMetadata, _ := NewLabelValueMetadata("byteSliceLabelName", "byteSliceLabelColumnName", StringValueType) + btSliceLabelValue := byteSliceLabelValue{ + metadata: btSliceLabelValueMetadata, + value: stringValue, + } + lckReqSliceLabelValueMetadata, _ := NewLabelValueMetadata("lockRequestSliceLabelName", "lockRequestSliceLabelColumnName", LockRequestSliceValueType) + lckReqSliceLabelValue := lockRequestSliceLabelValue{ + metadata: lckReqSliceLabelValueMetadata, + value: stringValue, + } + + return []LabelValue{ + strLabelValue, + bLabelValue, + i64LabelValue, + strSliceLabelValue, + btSliceLabelValue, + lckReqSliceLabelValue, + } +} + +func allPossibleMetricValues(metricDataType pmetric.MetricType) []MetricValue { + dataType := NewMetricType(metricDataType, pmetric.AggregationTemporalityDelta, true) + int64Metadata, _ := NewMetricValueMetadata("int64MetricName", "int64MetricColumnName", dataType, + metricUnit, IntValueType) + float64Metadata, _ := NewMetricValueMetadata("float64MetricName", "float64MetricColumnName", dataType, + metricUnit, FloatValueType) + return []MetricValue{ + int64MetricValue{ + metadata: int64Metadata, + value: int64Value, + }, + float64MetricValue{ + metadata: float64Metadata, + value: float64Value, + }, + } +} + +func assertDefaultLabels(t *testing.T, attributesMap pcommon.Map, databaseID *datasource.DatabaseID) { + assertStringLabelValue(t, attributesMap, projectIDLabelName, databaseID.ProjectID()) + assertStringLabelValue(t, attributesMap, instanceIDLabelName, databaseID.InstanceID()) + assertStringLabelValue(t, attributesMap, databaseLabelName, databaseID.DatabaseName()) +} + +func assertNonDefaultLabels(t *testing.T, attributesMap pcommon.Map, labelValues []LabelValue) { + for _, labelValue := range labelValues { + assertLabelValue(t, attributesMap, labelValue) + } +} + +func assertLabelValue(t *testing.T, attributesMap pcommon.Map, labelValue LabelValue) { + value, exists := attributesMap.Get(labelValue.Metadata().Name()) + + assert.True(t, exists) + switch labelValue.(type) { + case stringLabelValue, stringSliceLabelValue, byteSliceLabelValue, lockRequestSliceLabelValue: + assert.Equal(t, labelValue.Value(), value.Str()) + case boolLabelValue: + assert.Equal(t, labelValue.Value(), value.Bool()) + case int64LabelValue: + assert.Equal(t, labelValue.Value(), value.Int()) + default: + assert.Fail(t, "Unknown label value type received") + } +} + +func assertStringLabelValue(t *testing.T, attributesMap pcommon.Map, labelName string, expectedValue any) { + value, exists := attributesMap.Get(labelName) + + assert.True(t, exists) + assert.Equal(t, expectedValue, value.Str()) +} + +func assertMetricValue(t *testing.T, metricValue MetricValue, dataPoint pmetric.NumberDataPoint) { + switch metricValue.(type) { + case int64MetricValue: + assert.Equal(t, metricValue.Value(), dataPoint.IntValue()) + case float64MetricValue: + assert.Equal(t, metricValue.Value(), dataPoint.DoubleValue()) + } +} + +func assertLabel(t *testing.T, lbl label, expectedName string, expectedValue any) { + assert.Equal(t, expectedName, lbl.Name) + assert.Equal(t, expectedValue, lbl.Value) +} + +func metricsDataPointForTests() *MetricsDataPoint { + timestamp := time.Now().UTC() + labelValues := allPossibleLabelValues() + databaseID := databaseID() + + return &MetricsDataPoint{ + metricName: metricName, + timestamp: timestamp, + databaseID: databaseID, + labelValues: labelValues, + metricValue: allPossibleMetricValues(metricDataType)[0], + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata.go new file mode 100644 index 000000000000..cb21d8b88b38 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + +import ( + "fmt" + "time" + + "cloud.google.com/go/spanner" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" +) + +type MetricsMetadataType int32 + +const ( + MetricsMetadataTypeCurrentStats MetricsMetadataType = iota + MetricsMetadataTypeIntervalStats +) + +type MetricsMetadata struct { + Name string + Query string + MetricNamePrefix string + TimestampColumnName string + HighCardinality bool + // In addition to common metric labels + QueryLabelValuesMetadata []LabelValueMetadata + QueryMetricValuesMetadata []MetricValueMetadata +} + +func (metadata *MetricsMetadata) timestamp(row *spanner.Row) (time.Time, error) { + if metadata.MetadataType() == MetricsMetadataTypeCurrentStats { + return time.Now().UTC(), nil + } + var timestamp time.Time + err := row.ColumnByName(metadata.TimestampColumnName, ×tamp) + return timestamp, err +} + +func (metadata *MetricsMetadata) toLabelValues(row *spanner.Row) ([]LabelValue, error) { + values := make([]LabelValue, len(metadata.QueryLabelValuesMetadata)) + + for i, metadataItems := range metadata.QueryLabelValuesMetadata { + var err error + + if values[i], err = toLabelValue(metadataItems, row); err != nil { + return nil, err + } + } + + return values, nil +} + +func toLabelValue(labelValueMetadata LabelValueMetadata, row *spanner.Row) (LabelValue, error) { + valueHolder := labelValueMetadata.ValueHolder() + + err := row.ColumnByName(labelValueMetadata.ColumnName(), valueHolder) + if err != nil { + return nil, err + } + + return labelValueMetadata.NewLabelValue(valueHolder), nil +} + +func (metadata *MetricsMetadata) toMetricValues(row *spanner.Row) ([]MetricValue, error) { + values := make([]MetricValue, len(metadata.QueryMetricValuesMetadata)) + + for i, metadataItems := range metadata.QueryMetricValuesMetadata { + var err error + + if values[i], err = toMetricValue(metadataItems, row); err != nil { + return nil, err + } + } + + return values, nil +} + +func toMetricValue(metricValueMetadata MetricValueMetadata, row *spanner.Row) (MetricValue, error) { + valueHolder := metricValueMetadata.ValueHolder() + + err := row.ColumnByName(metricValueMetadata.ColumnName(), valueHolder) + if err != nil { + return nil, err + } + + return metricValueMetadata.NewMetricValue(valueHolder), nil +} + +func (metadata *MetricsMetadata) RowToMetricsDataPoints(databaseID *datasource.DatabaseID, row *spanner.Row) ([]*MetricsDataPoint, error) { + timestamp, err := metadata.timestamp(row) + if err != nil { + return nil, fmt.Errorf("error occurred during extracting timestamp %w", err) + } + + // Reading labels + labelValues, err := metadata.toLabelValues(row) + if err != nil { + return nil, fmt.Errorf("error occurred during extracting label values for row: %w", err) + } + + // Reading metrics + metricValues, err := metadata.toMetricValues(row) + if err != nil { + return nil, fmt.Errorf("error occurred during extracting metric values row: %w", err) + } + + return metadata.toMetricsDataPoints(databaseID, timestamp, labelValues, metricValues), nil +} + +func (metadata *MetricsMetadata) toMetricsDataPoints(databaseID *datasource.DatabaseID, timestamp time.Time, + labelValues []LabelValue, metricValues []MetricValue) []*MetricsDataPoint { + + dataPoints := make([]*MetricsDataPoint, len(metricValues)) + + for i, metricValue := range metricValues { + dataPoint := &MetricsDataPoint{ + metricName: metadata.MetricNamePrefix + metricValue.Metadata().Name(), + timestamp: timestamp, + databaseID: databaseID, + labelValues: labelValues, + metricValue: metricValue, + } + dataPoints[i] = dataPoint + } + + return dataPoints +} + +func (metadata *MetricsMetadata) MetadataType() MetricsMetadataType { + if metadata.TimestampColumnName == "" { + return MetricsMetadataTypeCurrentStats + } + return MetricsMetadataTypeIntervalStats +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata_test.go new file mode 100644 index 000000000000..683edc59ccc3 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata_test.go @@ -0,0 +1,304 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "testing" + "time" + + "cloud.google.com/go/spanner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" +) + +const ( + projectID = "ProjectID" + instanceID = "InstanceID" + databaseName = "DatabaseName" +) + +func databaseID() *datasource.DatabaseID { + return datasource.NewDatabaseID(projectID, instanceID, databaseName) +} + +func TestMetricsMetadata_Timestamp(t *testing.T) { + testCases := map[string]struct { + metadata *MetricsMetadata + rowColumnNames []string + rowColumnValues []any + errorRequired bool + }{ + "Happy path": {&MetricsMetadata{TimestampColumnName: timestampColumnName}, []string{timestampColumnName}, []any{time.Now().UTC()}, false}, + "No timestamp column name": {&MetricsMetadata{}, []string{}, []any{}, false}, + "With error": {&MetricsMetadata{TimestampColumnName: "nonExistingColumn"}, []string{timestampColumnName}, []any{time.Now().UTC()}, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + row, _ := spanner.NewRow(testCase.rowColumnNames, testCase.rowColumnValues) + + timestamp, err := testCase.metadata.timestamp(row) + + if testCase.errorRequired { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.NotNil(t, timestamp) + assert.False(t, timestamp.IsZero()) + + if len(testCase.rowColumnValues) == 1 { + assert.Equal(t, testCase.rowColumnValues[0], timestamp) + } + } + }) + } +} + +func TestToLabelValue(t *testing.T) { + rowColumnNames := []string{labelColumnName} + testCases := map[string]struct { + valueType ValueType + expectedType LabelValue + expectedValue any + expectedTransformedValue any + }{ + "String label value metadata": {StringValueType, stringLabelValue{}, stringValue, nil}, + "Int64 label value metadata": {IntValueType, int64LabelValue{}, int64Value, nil}, + "Bool label value metadata": {BoolValueType, boolLabelValue{}, boolValue, nil}, + "String slice label value metadata": {StringSliceValueType, stringSliceLabelValue{}, []string{stringValue, stringValue}, stringValue + "," + stringValue}, + "Byte slice label value metadata": {ByteSliceValueType, byteSliceLabelValue{}, []byte(stringValue), stringValue}, + "Lock request slice label value metadata": {LockRequestSliceValueType, lockRequestSliceLabelValue{}, []*lockRequest{{"lockMode", "column", "transactionTag"}}, "{lockMode,column,transactionTag}"}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + row, _ := spanner.NewRow(rowColumnNames, []any{testCase.expectedValue}) + metadata, _ := NewLabelValueMetadata(labelName, labelColumnName, testCase.valueType) + + labelValue, _ := toLabelValue(metadata, row) + + assert.IsType(t, testCase.expectedType, labelValue) + assert.Equal(t, labelName, labelValue.Metadata().Name()) + assert.Equal(t, labelColumnName, labelValue.Metadata().ColumnName()) + if testCase.expectedTransformedValue != nil { + assert.Equal(t, testCase.expectedTransformedValue, labelValue.Value()) + } else { + assert.Equal(t, testCase.expectedValue, labelValue.Value()) + } + }) + } +} + +func TestMetricsMetadata_ToLabelValues_AllPossibleMetadata(t *testing.T) { + stringLabelValueMetadata, _ := NewLabelValueMetadata("stringLabelName", "stringLabelColumnName", StringValueType) + boolLabelValueMetadata, _ := NewLabelValueMetadata("boolLabelName", "boolLabelColumnName", BoolValueType) + int64LabelValueMetadata, _ := NewLabelValueMetadata("int64LabelName", "int64LabelColumnName", IntValueType) + stringSliceLabelValueMetadata, _ := NewLabelValueMetadata("stringSliceLabelName", "stringSliceLabelColumnName", StringSliceValueType) + byteSliceLabelValueMetadata, _ := NewLabelValueMetadata("byteSliceLabelName", "byteSliceLabelColumnName", ByteSliceValueType) + lockRequestSliceLabelValueMetadata, _ := NewLabelValueMetadata("lockRequestSliceLabelName", "lockRequestSliceLabelColumnName", LockRequestSliceValueType) + queryLabelValuesMetadata := []LabelValueMetadata{ + stringLabelValueMetadata, + boolLabelValueMetadata, + int64LabelValueMetadata, + stringSliceLabelValueMetadata, + byteSliceLabelValueMetadata, + lockRequestSliceLabelValueMetadata, + } + metadata := MetricsMetadata{QueryLabelValuesMetadata: queryLabelValuesMetadata} + row, _ := spanner.NewRow( + []string{ + stringLabelValueMetadata.ColumnName(), + boolLabelValueMetadata.ColumnName(), + int64LabelValueMetadata.ColumnName(), + stringSliceLabelValueMetadata.ColumnName(), + byteSliceLabelValueMetadata.ColumnName(), + lockRequestSliceLabelValueMetadata.ColumnName(), + }, + []any{ + stringValue, + boolValue, + int64Value, + []string{stringValue, stringValue}, + []byte(stringValue), + []*lockRequest{{}}, + }) + + labelValues, _ := metadata.toLabelValues(row) + + assert.Equal(t, len(queryLabelValuesMetadata), len(labelValues)) + + expectedTypes := []LabelValue{ + stringLabelValue{}, + boolLabelValue{}, + int64LabelValue{}, + stringSliceLabelValue{}, + byteSliceLabelValue{}, + lockRequestSliceLabelValue{}, + } + + for i, expectedType := range expectedTypes { + assert.IsType(t, expectedType, labelValues[i]) + } +} + +func TestMetricsMetadata_ToLabelValues_Error(t *testing.T) { + stringLabelValueMetadata, _ := NewLabelValueMetadata("nonExisting", "nonExistingColumn", StringValueType) + queryLabelValuesMetadata := []LabelValueMetadata{stringLabelValueMetadata} + metadata := MetricsMetadata{QueryLabelValuesMetadata: queryLabelValuesMetadata} + row, _ := spanner.NewRow([]string{}, []any{}) + + labelValues, err := metadata.toLabelValues(row) + + assert.Nil(t, labelValues) + require.Error(t, err) +} + +func TestToMetricValue(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + rowColumnNames := []string{metricColumnName} + testCases := map[string]struct { + valueType ValueType + expectedType MetricValue + expectedValue any + }{ + "Int64 metric value metadata": {IntValueType, int64MetricValue{}, int64Value}, + "Float64 metric value metadata": {FloatValueType, float64MetricValue{}, float64Value}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + row, _ := spanner.NewRow(rowColumnNames, []any{testCase.expectedValue}) + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, testCase.valueType) + + metricValue, _ := toMetricValue(metadata, row) + + assert.IsType(t, testCase.expectedType, metricValue) + assert.Equal(t, metricName, metricValue.Metadata().Name()) + assert.Equal(t, metricColumnName, metricValue.Metadata().ColumnName()) + assert.Equal(t, metricDataType, metricValue.Metadata().DataType()) + assert.Equal(t, metricUnit, metricValue.Metadata().Unit()) + assert.Equal(t, testCase.expectedValue, metricValue.Value()) + }) + } +} + +func TestMetricsMetadata_ToMetricValues_AllPossibleMetadata(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + int64MetricValueMetadata, _ := NewMetricValueMetadata("int64MetricName", + "int64MetricColumnName", metricDataType, metricUnit, IntValueType) + float64MetricValueMetadata, _ := NewMetricValueMetadata("float64MetricName", + "float64MetricColumnName", metricDataType, metricUnit, FloatValueType) + queryMetricValuesMetadata := []MetricValueMetadata{ + int64MetricValueMetadata, + float64MetricValueMetadata, + } + metadata := MetricsMetadata{QueryMetricValuesMetadata: queryMetricValuesMetadata} + row, _ := spanner.NewRow( + []string{int64MetricValueMetadata.ColumnName(), float64MetricValueMetadata.ColumnName()}, + []any{int64Value, float64Value}) + + metricValues, _ := metadata.toMetricValues(row) + + assert.Equal(t, len(queryMetricValuesMetadata), len(metricValues)) + + expectedTypes := []MetricValue{int64MetricValue{}, float64MetricValue{}} + + for i, expectedType := range expectedTypes { + assert.IsType(t, expectedType, metricValues[i]) + } +} + +func TestMetricsMetadata_ToMetricValues_Error(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + int64MetricValueMetadata, _ := NewMetricValueMetadata("nonExistingMetricName", + "nonExistingMetricColumnName", metricDataType, metricUnit, IntValueType) + queryMetricValuesMetadata := []MetricValueMetadata{int64MetricValueMetadata} + metadata := MetricsMetadata{QueryMetricValuesMetadata: queryMetricValuesMetadata} + row, _ := spanner.NewRow([]string{}, []any{}) + + metricValues, err := metadata.toMetricValues(row) + + assert.Nil(t, metricValues) + require.Error(t, err) +} + +func TestMetricsMetadata_RowToMetricsDataPoints(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + timestamp := time.Now().UTC() + labelValueMetadata, _ := NewLabelValueMetadata(labelName, labelColumnName, StringValueType) + metricValueMetadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, IntValueType) + queryLabelValuesMetadata := []LabelValueMetadata{labelValueMetadata} + queryMetricValuesMetadata := []MetricValueMetadata{metricValueMetadata} + databaseID := databaseID() + metadata := MetricsMetadata{ + MetricNamePrefix: metricNamePrefix, + TimestampColumnName: timestampColumnName, + QueryLabelValuesMetadata: queryLabelValuesMetadata, + QueryMetricValuesMetadata: queryMetricValuesMetadata, + } + testCases := map[string]struct { + rowColumnNames []string + rowColumnValues []any + expectError bool + }{ + "Happy path": {[]string{labelColumnName, metricColumnName, timestampColumnName}, []any{stringValue, int64Value, timestamp}, false}, + "Error on timestamp": {[]string{labelColumnName, metricColumnName}, []any{stringValue, int64Value}, true}, + "Error on label value": {[]string{metricColumnName, timestampColumnName}, []any{int64Value, timestamp}, true}, + "Error on metric value": {[]string{labelColumnName, timestampColumnName}, []any{stringValue, timestamp}, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + row, _ := spanner.NewRow(testCase.rowColumnNames, testCase.rowColumnValues) + dataPoints, err := metadata.RowToMetricsDataPoints(databaseID, row) + + if testCase.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Len(t, dataPoints, 1) + } + }) + } +} + +func TestMetricsMetadata_MetadataType(t *testing.T) { + testCases := map[string]struct { + timestampColumnName string + expectedMetadataType MetricsMetadataType + }{ + "Current stats metadata": {"", MetricsMetadataTypeCurrentStats}, + "Interval stats metadata": {"timestampColumnName", MetricsMetadataTypeIntervalStats}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metricsMetadata := &MetricsMetadata{TimestampColumnName: testCase.timestampColumnName} + + assert.Equal(t, testCase.expectedMetadataType, metricsMetadata.MetadataType()) + }) + } +} + +func TestMetricsMetadata_ToMetricsDataPoints(t *testing.T) { + timestamp := time.Now().UTC() + labelValues := allPossibleLabelValues() + metricValues := allPossibleMetricValues(metricDataType) + databaseID := databaseID() + metadata := MetricsMetadata{MetricNamePrefix: metricNamePrefix} + + dataPoints := metadata.toMetricsDataPoints(databaseID, timestamp, labelValues, metricValues) + + assert.Equal(t, len(metricValues), len(dataPoints)) + + for i, dataPoint := range dataPoints { + assert.Equal(t, metadata.MetricNamePrefix+metricValues[i].Metadata().Name(), dataPoint.metricName) + assert.Equal(t, timestamp, dataPoint.timestamp) + assert.Equal(t, databaseID, dataPoint.databaseID) + assert.Equal(t, labelValues, dataPoint.labelValues) + assert.Equal(t, metricValues[i].Value(), dataPoint.metricValue.Value()) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue.go new file mode 100644 index 000000000000..d5f78b5c1710 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue.go @@ -0,0 +1,183 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + +import ( + "fmt" + + "cloud.google.com/go/spanner" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +type newMetricValueFunction func(m MetricValueMetadata, value any) MetricValue + +type MetricValueMetadata interface { + ValueMetadata + ValueType() ValueType + DataType() MetricType + Unit() string + NewMetricValue(value any) MetricValue +} + +type MetricValue interface { + Metadata() MetricValueMetadata + Value() any + SetValueTo(ndp pmetric.NumberDataPoint) +} + +type queryMetricValueMetadata struct { + name string + columnName string + dataType MetricType + unit string + valueType ValueType + newMetricValueFunc newMetricValueFunction + valueHolderFunc valueHolderFunction +} + +type int64MetricValue struct { + metadata MetricValueMetadata + value int64 +} + +type float64MetricValue struct { + metadata MetricValueMetadata + value float64 +} + +type nullFloat64MetricValue struct { + metadata MetricValueMetadata + value spanner.NullFloat64 +} + +func (m queryMetricValueMetadata) ValueHolder() any { + return m.valueHolderFunc() +} + +func (m queryMetricValueMetadata) NewMetricValue(value any) MetricValue { + return m.newMetricValueFunc(m, value) +} + +func (m queryMetricValueMetadata) Name() string { + return m.name +} + +func (m queryMetricValueMetadata) ColumnName() string { + return m.columnName +} + +func (m queryMetricValueMetadata) ValueType() ValueType { + return m.valueType +} + +func (m queryMetricValueMetadata) DataType() MetricType { + return m.dataType +} + +func (m queryMetricValueMetadata) Unit() string { + return m.unit +} + +func (v int64MetricValue) Metadata() MetricValueMetadata { + return v.metadata +} + +func (v float64MetricValue) Metadata() MetricValueMetadata { + return v.metadata +} + +func (v nullFloat64MetricValue) Metadata() MetricValueMetadata { + return v.metadata +} + +func (v int64MetricValue) Value() any { + return v.value +} + +func (v float64MetricValue) Value() any { + return v.value +} + +func (v nullFloat64MetricValue) Value() any { + return v.value +} + +func (v int64MetricValue) SetValueTo(point pmetric.NumberDataPoint) { + point.SetIntValue(v.value) +} + +func (v float64MetricValue) SetValueTo(point pmetric.NumberDataPoint) { + point.SetDoubleValue(v.value) +} + +func (v nullFloat64MetricValue) SetValueTo(point pmetric.NumberDataPoint) { + if v.value.Valid { + point.SetDoubleValue(v.value.Float64) + } else { + point.SetDoubleValue(0) + } +} + +func newInt64MetricValue(metadata MetricValueMetadata, valueHolder any) MetricValue { + return int64MetricValue{ + metadata: metadata, + value: *valueHolder.(*int64), + } +} + +func newFloat64MetricValue(metadata MetricValueMetadata, valueHolder any) MetricValue { + return float64MetricValue{ + metadata: metadata, + value: *valueHolder.(*float64), + } +} + +func newNullFloat64MetricValue(metadata MetricValueMetadata, valueHolder any) MetricValue { + return nullFloat64MetricValue{ + metadata: metadata, + value: *valueHolder.(*spanner.NullFloat64), + } +} + +func NewMetricValueMetadata(name string, columnName string, dataType MetricType, unit string, + valueType ValueType) (MetricValueMetadata, error) { + + var newMetricValueFunc newMetricValueFunction + var valueHolderFunc valueHolderFunction + + switch valueType { + case IntValueType: + newMetricValueFunc = newInt64MetricValue + valueHolderFunc = func() any { + var valueHolder int64 + return &valueHolder + } + case FloatValueType: + newMetricValueFunc = newFloat64MetricValue + valueHolderFunc = func() any { + var valueHolder float64 + return &valueHolder + } + case NullFloatValueType: + newMetricValueFunc = newNullFloat64MetricValue + valueHolderFunc = func() any { + var valueHolder spanner.NullFloat64 + return &valueHolder + } + case UnknownValueType, StringValueType, BoolValueType, StringSliceValueType, ByteSliceValueType, LockRequestSliceValueType: + fallthrough + default: + return nil, fmt.Errorf("invalid value type received for metric value %q", name) + } + + return queryMetricValueMetadata{ + name: name, + columnName: columnName, + dataType: dataType, + unit: unit, + valueType: valueType, + newMetricValueFunc: newMetricValueFunc, + valueHolderFunc: valueHolderFunc, + }, nil +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue_test.go new file mode 100644 index 000000000000..a3d686e55bdf --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue_test.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "testing" + + "cloud.google.com/go/spanner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestInt64MetricValueMetadata(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, IntValueType) + + assert.Equal(t, metricName, metadata.Name()) + assert.Equal(t, metricColumnName, metadata.ColumnName()) + assert.Equal(t, metricDataType, metadata.DataType()) + assert.Equal(t, metricUnit, metadata.Unit()) + assert.Equal(t, IntValueType, metadata.ValueType()) + + var expectedType *int64 + + assert.IsType(t, expectedType, metadata.ValueHolder()) +} + +func TestFloat64MetricValueMetadata(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, FloatValueType) + + assert.Equal(t, metricName, metadata.Name()) + assert.Equal(t, metricColumnName, metadata.ColumnName()) + assert.Equal(t, metricDataType, metadata.DataType()) + assert.Equal(t, metricUnit, metadata.Unit()) + assert.Equal(t, FloatValueType, metadata.ValueType()) + + var expectedType *float64 + + assert.IsType(t, expectedType, metadata.ValueHolder()) +} + +func TestNullFloat64MetricValueMetadata(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, NullFloatValueType) + + assert.Equal(t, metricName, metadata.Name()) + assert.Equal(t, metricColumnName, metadata.ColumnName()) + assert.Equal(t, metricDataType, metadata.DataType()) + assert.Equal(t, metricUnit, metadata.Unit()) + assert.Equal(t, NullFloatValueType, metadata.ValueType()) + + var expectedType *spanner.NullFloat64 + + assert.IsType(t, expectedType, metadata.ValueHolder()) +} + +func TestUnknownMetricValueMetadata(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, err := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, UnknownValueType) + + require.Error(t, err) + require.Nil(t, metadata) +} + +func TestInt64MetricValue(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, IntValueType) + metricValue := int64MetricValue{ + metadata: metadata, + value: int64Value, + } + + assert.Equal(t, int64Value, metricValue.Value()) + assert.Equal(t, IntValueType, metadata.ValueType()) + + dataPoint := pmetric.NewNumberDataPoint() + + metricValue.SetValueTo(dataPoint) + + assert.Equal(t, int64Value, dataPoint.IntValue()) +} + +func TestFloat64MetricValue(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, FloatValueType) + metricValue := float64MetricValue{ + metadata: metadata, + value: float64Value, + } + + assert.Equal(t, float64Value, metricValue.Value()) + assert.Equal(t, FloatValueType, metadata.ValueType()) + + dataPoint := pmetric.NewNumberDataPoint() + + metricValue.SetValueTo(dataPoint) + + assert.Equal(t, float64Value, dataPoint.DoubleValue()) +} + +func TestNullFloat64MetricValue(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, NullFloatValueType) + + validNullFloat := spanner.NullFloat64{Float64: float64Value, Valid: true} + metricValue := nullFloat64MetricValue{ + metadata: metadata, + value: validNullFloat, + } + assert.Equal(t, validNullFloat, metricValue.Value()) + assert.Equal(t, NullFloatValueType, metadata.ValueType()) + dataPoint := pmetric.NewNumberDataPoint() + metricValue.SetValueTo(dataPoint) + assert.Equal(t, float64Value, dataPoint.DoubleValue()) + + invalidNullFloat := spanner.NullFloat64{Float64: float64Value, Valid: false} + metricValue = nullFloat64MetricValue{ + metadata: metadata, + value: invalidNullFloat, + } + assert.Equal(t, invalidNullFloat, metricValue.Value()) + assert.Equal(t, NullFloatValueType, metadata.ValueType()) + metricValue.SetValueTo(dataPoint) + assert.Equal(t, defaultNullFloat64Value, dataPoint.DoubleValue()) +} + +func TestNewInt64MetricValue(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, IntValueType) + value := int64Value + valueHolder := &value + + metricValue := newInt64MetricValue(metadata, valueHolder) + + assert.Equal(t, int64Value, metricValue.Value()) + assert.Equal(t, IntValueType, metadata.ValueType()) +} + +func TestNewFloat64MetricValue(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, FloatValueType) + value := float64Value + valueHolder := &value + + metricValue := newFloat64MetricValue(metadata, valueHolder) + + assert.Equal(t, float64Value, metricValue.Value()) + assert.Equal(t, FloatValueType, metadata.ValueType()) +} + +func TestNewNullFloat64MetricValue(t *testing.T) { + metricDataType := metricValueDataType{dataType: metricDataType} + metadata, _ := NewMetricValueMetadata(metricName, metricColumnName, metricDataType, metricUnit, NullFloatValueType) + value := spanner.NullFloat64{Float64: float64Value, Valid: true} + valueHolder := &value + + metricValue := newNullFloat64MetricValue(metadata, valueHolder) + + assert.Equal(t, value, metricValue.Value()) + assert.Equal(t, NullFloatValueType, metadata.ValueType()) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/package_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/package_test.go new file mode 100644 index 000000000000..c83588d3062e --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/package_test.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata + +import ( + "testing" + + "go.uber.org/goleak" +) + +// The IgnoreTopFunction call prevents catching the leak generated by opencensus +// defaultWorker.Start which at this time is part of the package's init call. +// See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information. +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start")) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/valuemetadata.go b/receiver/googlecloudspannerreceiver/internal/metadata/valuemetadata.go new file mode 100644 index 000000000000..c6b2f174ca52 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadata/valuemetadata.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + +type ValueType string + +const ( + UnknownValueType ValueType = "unknown" + StringValueType ValueType = "string" + IntValueType ValueType = "int" + FloatValueType ValueType = "float" + NullFloatValueType ValueType = "null_float" + BoolValueType ValueType = "bool" + StringSliceValueType ValueType = "string_slice" + ByteSliceValueType ValueType = "byte_slice" + LockRequestSliceValueType ValueType = "lock_request_slice" +) + +type ValueMetadata interface { + Name() string + ColumnName() string + ValueHolder() any +} + +type valueHolderFunction func() any diff --git a/receiver/googlecloudspannerreceiver/internal/metadataconfig/metadata_yaml_test.go b/receiver/googlecloudspannerreceiver/internal/metadataconfig/metadata_yaml_test.go new file mode 100644 index 000000000000..1335beefeaa3 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataconfig/metadata_yaml_test.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataconfig + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadataparser" +) + +// Kind of sanity check test of metrics.yaml used for production usage +func TestParsingMetadataYaml(t *testing.T) { + content, err := os.ReadFile("metrics.yaml") + + require.NoError(t, err) + + metadataMap, err := metadataparser.ParseMetadataConfig(content) + + require.NoError(t, err) + require.NotNil(t, metadataMap) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataconfig/metrics.yaml b/receiver/googlecloudspannerreceiver/internal/metadataconfig/metrics.yaml new file mode 100644 index 000000000000..a5c6ebd0cc55 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataconfig/metrics.yaml @@ -0,0 +1,540 @@ +metadata: +# +# -------------------------------------------- Active Queries Summary -------------------------------------------------- +# + - name: "active queries summary" + query: "SELECT * FROM SPANNER_SYS.ACTIVE_QUERIES_SUMMARY" + metric_name_prefix: "database/spanner/active_queries_summary/" + high_cardinality: false + metrics: + - name: "active_count" + column_name: "ACTIVE_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "count_older_than_1s" + column_name: "COUNT_OLDER_THAN_1S" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "count_older_than_10s" + column_name: "COUNT_OLDER_THAN_10S" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "count_older_than_100s" + column_name: "COUNT_OLDER_THAN_100S" + value_type: "int" + data: + type: "gauge" + unit: "one" +# +# -------------------------------------------- Locks Stats ------------------------------------------------------------- +# + - name: "top minute lock stats" + query: "SELECT * FROM SPANNER_SYS.LOCK_STATS_TOP_MINUTE WHERE INTERVAL_END = @pullTimestamp ORDER BY INTERVAL_END DESC, LOCK_WAIT_SECONDS DESC" + metric_name_prefix: "database/spanner/lock_stats/top/" + timestamp_column_name: "INTERVAL_END" + high_cardinality: true + labels: + - name: "row_range_start_key" + column_name: "ROW_RANGE_START_KEY" + value_type: "byte_slice" + - name: "sample_lock_requests" + column_name: "SAMPLE_LOCK_REQUESTS" + value_type: "lock_request_slice" + metrics: + - name: "lock_wait_seconds" + column_name: "LOCK_WAIT_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + + - name: "total minute lock stats" + query: "SELECT * FROM SPANNER_SYS.LOCK_STATS_TOTAL_MINUTE WHERE INTERVAL_END = @pullTimestamp" + metric_name_prefix: "database/spanner/lock_stats/total/" + timestamp_column_name: "INTERVAL_END" + high_cardinality: false + metrics: + - name: "total_lock_wait_seconds" + column_name: "TOTAL_LOCK_WAIT_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" +# +# -------------------------------------------- Queries Stats ----------------------------------------------------------- +# + - name: "top minute query stats" + query: "SELECT INTERVAL_END, TEXT, REQUEST_TAG, TEXT_TRUNCATED, TEXT_FINGERPRINT, EXECUTION_COUNT, AVG_LATENCY_SECONDS, AVG_ROWS, AVG_BYTES, AVG_ROWS_SCANNED, AVG_CPU_SECONDS, ALL_FAILED_EXECUTION_COUNT, ALL_FAILED_AVG_LATENCY_SECONDS, CANCELLED_OR_DISCONNECTED_EXECUTION_COUNT, TIMED_OUT_EXECUTION_COUNT, SPANNER_SYS.DISTRIBUTION_PERCENTILE(LATENCY_DISTRIBUTION[OFFSET(0)], 99.0) AS NINETY_NINE_PERCENTILE_LATENCY, SPANNER_SYS.DISTRIBUTION_PERCENTILE(LATENCY_DISTRIBUTION[OFFSET(0)], 95.0) AS NINETY_FIVE_PERCENTILE_LATENCY, SPANNER_SYS.DISTRIBUTION_PERCENTILE(LATENCY_DISTRIBUTION[OFFSET(0)], 50.0) AS FIFTY_PERCENTILE_LATENCY FROM SPANNER_SYS.QUERY_STATS_TOP_MINUTE WHERE INTERVAL_END = @pullTimestamp ORDER BY INTERVAL_END DESC, EXECUTION_COUNT * AVG_CPU_SECONDS DESC" + metric_name_prefix: "database/spanner/query_stats/top/" + timestamp_column_name: "INTERVAL_END" + high_cardinality: true + labels: + - name: "query_text" + column_name: "TEXT" + value_type: "string" + - name: "request_tag" + column_name: "REQUEST_TAG" + value_type: "string" + - name: "query_text_truncated" + column_name: "TEXT_TRUNCATED" + value_type: "bool" + - name: "query_text_fingerprint" + column_name: "TEXT_FINGERPRINT" + value_type: "int" + metrics: + - name: "execution_count" + column_name: "EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "avg_latency_seconds" + column_name: "AVG_LATENCY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_rows" + column_name: "AVG_ROWS" + value_type: "float" + data: + type: "gauge" + unit: "row" + - name: "avg_bytes" + column_name: "AVG_BYTES" + value_type: "float" + data: + type: "gauge" + unit: "byte" + - name: "avg_rows_scanned" + column_name: "AVG_ROWS_SCANNED" + value_type: "float" + data: + type: "gauge" + unit: "row" + - name: "avg_cpu_seconds" + column_name: "AVG_CPU_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "all_failed_execution_count" + column_name: "ALL_FAILED_EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "all_failed_avg_latency_seconds" + column_name: "ALL_FAILED_AVG_LATENCY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "cancelled_or_disconnected_execution_count" + column_name: "CANCELLED_OR_DISCONNECTED_EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "timed_out_execution_count" + column_name: "TIMED_OUT_EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "ninety_nine_percentile_latency" + column_name: "NINETY_NINE_PERCENTILE_LATENCY" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "ninety_five_percentile_latency" + column_name: "NINETY_FIVE_PERCENTILE_LATENCY" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "fifty_percentile_latency" + column_name: "FIFTY_PERCENTILE_LATENCY" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "total minute query stats" + query: "SELECT INTERVAL_END, EXECUTION_COUNT, AVG_LATENCY_SECONDS, AVG_ROWS, AVG_BYTES, AVG_ROWS_SCANNED, AVG_CPU_SECONDS, ALL_FAILED_EXECUTION_COUNT, ALL_FAILED_AVG_LATENCY_SECONDS, CANCELLED_OR_DISCONNECTED_EXECUTION_COUNT, TIMED_OUT_EXECUTION_COUNT, SPANNER_SYS.DISTRIBUTION_PERCENTILE(LATENCY_DISTRIBUTION[OFFSET(0)], 99.0) AS NINETY_NINE_PERCENTILE_LATENCY, SPANNER_SYS.DISTRIBUTION_PERCENTILE(LATENCY_DISTRIBUTION[OFFSET(0)], 95.0) AS NINETY_FIVE_PERCENTILE_LATENCY, SPANNER_SYS.DISTRIBUTION_PERCENTILE(LATENCY_DISTRIBUTION[OFFSET(0)], 50.0) AS FIFTY_PERCENTILE_LATENCY FROM SPANNER_SYS.QUERY_STATS_TOTAL_MINUTE WHERE INTERVAL_END = @pullTimestamp" + metric_name_prefix: "database/spanner/query_stats/total/" + timestamp_column_name: "INTERVAL_END" + high_cardinality: false + metrics: + - name: "execution_count" + column_name: "EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "avg_latency_seconds" + column_name: "AVG_LATENCY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_rows" + column_name: "AVG_ROWS" + value_type: "float" + data: + type: "gauge" + unit: "row" + - name: "avg_bytes" + column_name: "AVG_BYTES" + value_type: "float" + data: + type: "gauge" + unit: "byte" + - name: "avg_rows_scanned" + column_name: "AVG_ROWS_SCANNED" + value_type: "float" + data: + type: "gauge" + unit: "row" + - name: "avg_cpu_seconds" + column_name: "AVG_CPU_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "all_failed_execution_count" + column_name: "ALL_FAILED_EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "all_failed_avg_latency_seconds" + column_name: "ALL_FAILED_AVG_LATENCY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "cancelled_or_disconnected_execution_count" + column_name: "CANCELLED_OR_DISCONNECTED_EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "timed_out_execution_count" + column_name: "TIMED_OUT_EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "ninety_nine_percentile_latency" + column_name: "NINETY_NINE_PERCENTILE_LATENCY" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "ninety_five_percentile_latency" + column_name: "NINETY_FIVE_PERCENTILE_LATENCY" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "fifty_percentile_latency" + column_name: "FIFTY_PERCENTILE_LATENCY" + value_type: "float" + data: + type: "gauge" + unit: "second" +# +# -------------------------------------------- Reads Stats ------------------------------------------------------------- +# + - name: "top minute read stats" + query: "SELECT * FROM SPANNER_SYS.READ_STATS_TOP_MINUTE WHERE INTERVAL_END = @pullTimestamp ORDER BY INTERVAL_END DESC, EXECUTION_COUNT * AVG_CPU_SECONDS DESC" + metric_name_prefix: "database/spanner/read_stats/top/" + timestamp_column_name: "INTERVAL_END" + high_cardinality: true + labels: + - name: "read_columns" + column_name: "READ_COLUMNS" + value_type: "string_slice" + - name: "fingerprint" + column_name: "FPRINT" + value_type: "int" + metrics: + - name: "execution_count" + column_name: "EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "avg_rows" + column_name: "AVG_ROWS" + value_type: "float" + data: + type: "gauge" + unit: "row" + - name: "avg_bytes" + column_name: "AVG_BYTES" + value_type: "float" + data: + type: "gauge" + unit: "byte" + - name: "avg_cpu_seconds" + column_name: "AVG_CPU_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_locking_delay_seconds" + column_name: "AVG_LOCKING_DELAY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_client_wait_seconds" + column_name: "AVG_CLIENT_WAIT_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_leader_refresh_delay_seconds" + column_name: "AVG_LEADER_REFRESH_DELAY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "total minute read stats" + query: "SELECT * FROM SPANNER_SYS.READ_STATS_TOTAL_MINUTE WHERE INTERVAL_END = @pullTimestamp" + metric_name_prefix: "database/spanner/read_stats/total/" + timestamp_column_name: "INTERVAL_END" + high_cardinality: false + metrics: + - name: "execution_count" + column_name: "EXECUTION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "avg_rows" + column_name: "AVG_ROWS" + value_type: "float" + data: + type: "gauge" + unit: "row" + - name: "avg_bytes" + column_name: "AVG_BYTES" + value_type: "float" + data: + type: "gauge" + unit: "byte" + - name: "avg_cpu_seconds" + column_name: "AVG_CPU_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_locking_delay_seconds" + column_name: "AVG_LOCKING_DELAY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_client_wait_seconds" + column_name: "AVG_CLIENT_WAIT_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_leader_refresh_delay_seconds" + column_name: "AVG_LEADER_REFRESH_DELAY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" +# +# -------------------------------------------- Transactions Stats ------------------------------------------------------ +# + - name: "top minute transaction stats" + query: "SELECT INTERVAL_END, FPRINT, TRANSACTION_TAG, READ_COLUMNS, WRITE_CONSTRUCTIVE_COLUMNS, WRITE_DELETE_TABLES, COMMIT_ATTEMPT_COUNT, COMMIT_ABORT_COUNT, COMMIT_RETRY_COUNT, COMMIT_FAILED_PRECONDITION_COUNT, AVG_PARTICIPANTS, AVG_TOTAL_LATENCY_SECONDS, AVG_COMMIT_LATENCY_SECONDS, AVG_BYTES, CASE WHEN AVG_TOTAL_LATENCY_SECONDS IS NOT NULL THEN SPANNER_SYS.DISTRIBUTION_PERCENTILE(TOTAL_LATENCY_DISTRIBUTION[OFFSET(0)], 99.0) ELSE NULL END AS NINETY_NINE_PERCENTILE_TOTAL_LATENCY, CASE WHEN AVG_TOTAL_LATENCY_SECONDS IS NOT NULL THEN SPANNER_SYS.DISTRIBUTION_PERCENTILE(TOTAL_LATENCY_DISTRIBUTION[OFFSET(0)], 95.0) ELSE NULL END AS NINETY_FIVE_PERCENTILE_TOTAL_LATENCY, CASE WHEN AVG_TOTAL_LATENCY_SECONDS IS NOT NULL THEN SPANNER_SYS.DISTRIBUTION_PERCENTILE(TOTAL_LATENCY_DISTRIBUTION[OFFSET(0)], 50.0) ELSE NULL END AS FIFTY_PERCENTILE_TOTAL_LATENCY FROM SPANNER_SYS.TXN_STATS_TOP_MINUTE WHERE INTERVAL_END = @pullTimestamp ORDER BY INTERVAL_END DESC, AVG_COMMIT_LATENCY_SECONDS DESC, COMMIT_ATTEMPT_COUNT DESC, AVG_BYTES DESC" + metric_name_prefix: "database/spanner/txn_stats/top/" + timestamp_column_name: "INTERVAL_END" + high_cardinality: true + labels: + - name: "fingerprint" + column_name: "FPRINT" + value_type: "int" + - name: "transaction_tag" + column_name: "TRANSACTION_TAG" + value_type: "string" + - name: "read_columns" + column_name: "READ_COLUMNS" + value_type: "string_slice" + - name: "write_constructive_columns" + column_name: "WRITE_CONSTRUCTIVE_COLUMNS" + value_type: "string_slice" + - name: "write_delete_tables" + column_name: "WRITE_DELETE_TABLES" + value_type: "string_slice" + metrics: + - name: "commit_attempt_count" + column_name: "COMMIT_ATTEMPT_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "commit_abort_count" + column_name: "COMMIT_ABORT_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "commit_retry_count" + column_name: "COMMIT_RETRY_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "commit_failed_precondition_count" + column_name: "COMMIT_FAILED_PRECONDITION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "avg_participants" + column_name: "AVG_PARTICIPANTS" + value_type: "null_float" + data: + type: "gauge" + unit: "one" + - name: "avg_total_latency_seconds" + column_name: "AVG_TOTAL_LATENCY_SECONDS" + value_type: "null_float" + data: + type: "gauge" + unit: "second" + - name: "avg_commit_latency_seconds" + column_name: "AVG_COMMIT_LATENCY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_bytes" + column_name: "AVG_BYTES" + value_type: "float" + data: + type: "gauge" + unit: "byte" + - name: "ninety_nine_percentile_total_latency" + column_name: "NINETY_NINE_PERCENTILE_TOTAL_LATENCY" + value_type: "null_float" + data: + type: "gauge" + unit: "second" + - name: "ninety_five_percentile_total_latency" + column_name: "NINETY_FIVE_PERCENTILE_TOTAL_LATENCY" + value_type: "null_float" + data: + type: "gauge" + unit: "second" + - name: "fifty_percentile_total_latency" + column_name: "FIFTY_PERCENTILE_TOTAL_LATENCY" + value_type: "null_float" + data: + type: "gauge" + unit: "second" + - name: "total minute transaction stats" + query: "SELECT INTERVAL_END, COMMIT_ATTEMPT_COUNT, COMMIT_ABORT_COUNT, COMMIT_RETRY_COUNT, COMMIT_FAILED_PRECONDITION_COUNT, AVG_PARTICIPANTS, AVG_TOTAL_LATENCY_SECONDS, AVG_COMMIT_LATENCY_SECONDS, AVG_BYTES, CASE WHEN AVG_TOTAL_LATENCY_SECONDS IS NOT NULL THEN SPANNER_SYS.DISTRIBUTION_PERCENTILE(TOTAL_LATENCY_DISTRIBUTION[OFFSET(0)], 99.0) ELSE NULL END AS NINETY_NINE_PERCENTILE_TOTAL_LATENCY, CASE WHEN AVG_TOTAL_LATENCY_SECONDS IS NOT NULL THEN SPANNER_SYS.DISTRIBUTION_PERCENTILE(TOTAL_LATENCY_DISTRIBUTION[OFFSET(0)], 95.0) ELSE NULL END AS NINETY_FIVE_PERCENTILE_TOTAL_LATENCY, CASE WHEN AVG_TOTAL_LATENCY_SECONDS IS NOT NULL THEN SPANNER_SYS.DISTRIBUTION_PERCENTILE(TOTAL_LATENCY_DISTRIBUTION[OFFSET(0)], 50.0) ELSE NULL END AS FIFTY_PERCENTILE_TOTAL_LATENCY FROM SPANNER_SYS.TXN_STATS_TOTAL_MINUTE WHERE INTERVAL_END = @pullTimestamp" + metric_name_prefix: "database/spanner/txn_stats/total/" + timestamp_column_name: "INTERVAL_END" + high_cardinality: false + metrics: + - name: "commit_attempt_count" + column_name: "COMMIT_ATTEMPT_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "commit_abort_count" + column_name: "COMMIT_ABORT_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "commit_retry_count" + column_name: "COMMIT_RETRY_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "commit_failed_precondition_count" + column_name: "COMMIT_FAILED_PRECONDITION_COUNT" + value_type: "int" + data: + type: "gauge" + unit: "one" + - name: "avg_participants" + column_name: "AVG_PARTICIPANTS" + value_type: "null_float" + data: + type: "gauge" + unit: "one" + - name: "avg_total_latency_seconds" + column_name: "AVG_TOTAL_LATENCY_SECONDS" + value_type: "null_float" + data: + type: "gauge" + unit: "second" + - name: "avg_commit_latency_seconds" + column_name: "AVG_COMMIT_LATENCY_SECONDS" + value_type: "float" + data: + type: "gauge" + unit: "second" + - name: "avg_bytes" + column_name: "AVG_BYTES" + value_type: "float" + data: + type: "gauge" + unit: "byte" + - name: "ninety_nine_percentile_total_latency" + column_name: "NINETY_NINE_PERCENTILE_TOTAL_LATENCY" + value_type: "null_float" + data: + type: "gauge" + unit: "second" + - name: "ninety_five_percentile_total_latency" + column_name: "NINETY_FIVE_PERCENTILE_TOTAL_LATENCY" + value_type: "null_float" + data: + type: "gauge" + unit: "second" + - name: "fifty_percentile_total_latency" + column_name: "FIFTY_PERCENTILE_TOTAL_LATENCY" + value_type: "null_float" + data: + type: "gauge" + unit: "second" +# +# -------------------------------------------- Table Size Stats ------------------------------------------------------ +# + - name: "hourly table size stats" + query: "SELECT * FROM SPANNER_SYS.TABLE_SIZES_STATS_1HOUR WHERE INTERVAL_END = @pullTimestamp ORDER BY INTERVAL_END DESC, USED_BYTES DESC" + metric_name_prefix: "database/spanner/table_sizes/top/" + timestamp_column_name: "INTERVAL_END" + high_cardinality: true + labels: + - name: "table_name" + column_name: "TABLE_NAME" + value_type: "string" + metrics: + - name: "used_bytes" + column_name: "USED_BYTES" + value_type: "float" + data: + type: "gauge" + unit: "byte" diff --git a/receiver/googlecloudspannerreceiver/internal/metadataconfig/package_test.go b/receiver/googlecloudspannerreceiver/internal/metadataconfig/package_test.go new file mode 100644 index 000000000000..9a9fcd408990 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataconfig/package_test.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataconfig + +import ( + "testing" + + "go.uber.org/goleak" +) + +// The IgnoreTopFunction call prevents catching the leak generated by opencensus +// defaultWorker.Start which at this time is part of the package's init call. +// See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information. +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start")) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/label.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/label.go new file mode 100644 index 000000000000..8f08c40f70f5 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/label.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadataparser" + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +type Label struct { + Name string `yaml:"name"` + ColumnName string `yaml:"column_name"` + ValueType metadata.ValueType `yaml:"value_type"` +} + +func (label Label) toLabelValueMetadata() (metadata.LabelValueMetadata, error) { + return metadata.NewLabelValueMetadata(label.Name, label.ColumnName, label.ValueType) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/label_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/label_test.go new file mode 100644 index 000000000000..c8b1d5fffcd9 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/label_test.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + labelName = "labelName" + labelColumnName = "labelColumnName" +) + +func TestLabel_ToLabelValueMetadata(t *testing.T) { + testCases := map[string]struct { + valueType metadata.ValueType + expectError bool + }{ + "Value type is string": {metadata.StringValueType, false}, + "Value type is int": {metadata.IntValueType, false}, + "Value type is bool": {metadata.BoolValueType, false}, + "Value type is string slice": {metadata.StringSliceValueType, false}, + "Value type is byte slice": {metadata.ByteSliceValueType, false}, + "Value type is lock request slice": {metadata.LockRequestSliceValueType, false}, + "Value type is unknown": {metadata.UnknownValueType, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + label := Label{ + Name: labelName, + ColumnName: labelColumnName, + ValueType: testCase.valueType, + } + + valueMetadata, err := label.toLabelValueMetadata() + + if testCase.expectError { + require.Nil(t, valueMetadata) + require.Error(t, err) + } else { + require.NotNil(t, valueMetadata) + require.NoError(t, err) + + assert.Equal(t, label.Name, valueMetadata.Name()) + assert.Equal(t, label.ColumnName, valueMetadata.ColumnName()) + assert.Equal(t, label.ValueType, valueMetadata.ValueType()) + } + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata.go new file mode 100644 index 000000000000..1964e9eb28f8 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadataparser" + +import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + +type Metadata struct { + Name string `yaml:"name"` + Query string `yaml:"query"` + MetricNamePrefix string `yaml:"metric_name_prefix"` + TimestampColumnName string `yaml:"timestamp_column_name"` + HighCardinality bool `yaml:"high_cardinality"` + Labels []Label `yaml:"labels"` + Metrics []Metric `yaml:"metrics"` +} + +func (m Metadata) MetricsMetadata() (*metadata.MetricsMetadata, error) { + queryLabelValuesMetadata, err := m.toLabelValuesMetadata() + if err != nil { + return nil, err + } + + queryMetricValuesMetadata, err := m.toMetricValuesMetadata() + if err != nil { + return nil, err + } + + return &metadata.MetricsMetadata{ + Name: m.Name, + Query: m.Query, + MetricNamePrefix: m.MetricNamePrefix, + TimestampColumnName: m.TimestampColumnName, + HighCardinality: m.HighCardinality, + QueryLabelValuesMetadata: queryLabelValuesMetadata, + QueryMetricValuesMetadata: queryMetricValuesMetadata, + }, nil +} + +func (m Metadata) toLabelValuesMetadata() ([]metadata.LabelValueMetadata, error) { + valuesMetadata := make([]metadata.LabelValueMetadata, len(m.Labels)) + + for i, label := range m.Labels { + value, err := label.toLabelValueMetadata() + if err != nil { + return nil, err + } + + valuesMetadata[i] = value + } + + return valuesMetadata, nil +} + +func (m Metadata) toMetricValuesMetadata() ([]metadata.MetricValueMetadata, error) { + valuesMetadata := make([]metadata.MetricValueMetadata, len(m.Metrics)) + for i, metric := range m.Metrics { + value, err := metric.toMetricValueMetadata() + if err != nil { + return nil, err + } + + valuesMetadata[i] = value + } + + return valuesMetadata, nil +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata_test.go new file mode 100644 index 000000000000..3ff0f7ac8fd6 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata_test.go @@ -0,0 +1,145 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +func TestMetadata_ToLabelValuesMetadata(t *testing.T) { + testCases := map[string]struct { + valueType metadata.ValueType + expectError bool + }{ + "Happy path": {metadata.StringValueType, false}, + "With error": {"unknown", true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + label := Label{ + Name: labelName, + ColumnName: labelColumnName, + ValueType: testCase.valueType, + } + md := Metadata{ + Labels: []Label{label}, + } + + valuesMetadata, err := md.toLabelValuesMetadata() + + if testCase.expectError { + require.Nil(t, valuesMetadata) + require.Error(t, err) + } else { + require.NotNil(t, valuesMetadata) + require.NoError(t, err) + + assert.Len(t, valuesMetadata, 1) + } + }) + } +} + +func TestMetadata_ToMetricValuesMetadata(t *testing.T) { + testCases := map[string]struct { + valueType metadata.ValueType + dataType MetricType + expectError bool + }{ + "Happy path": {metadata.IntValueType, MetricType{DataType: GaugeMetricDataType}, false}, + "With error": {metadata.UnknownValueType, MetricType{DataType: GaugeMetricDataType}, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metric := Metric{ + Label: Label{ + Name: metricName, + ColumnName: metricColumnName, + ValueType: testCase.valueType, + }, + DataType: testCase.dataType, + } + md := Metadata{ + Metrics: []Metric{metric}, + } + + valuesMetadata, err := md.toMetricValuesMetadata() + + if testCase.expectError { + require.Nil(t, valuesMetadata) + require.Error(t, err) + } else { + require.NotNil(t, valuesMetadata) + require.NoError(t, err) + + assert.Len(t, valuesMetadata, 1) + } + }) + } +} + +func TestMetadata_MetricsMetadata(t *testing.T) { + testCases := map[string]struct { + labelValueType metadata.ValueType + metricValueType metadata.ValueType + dataType MetricType + expectError bool + }{ + "Happy path": {metadata.IntValueType, metadata.IntValueType, MetricType{DataType: GaugeMetricDataType}, false}, + "With label error": {metadata.UnknownValueType, metadata.IntValueType, MetricType{DataType: GaugeMetricDataType}, true}, + "With metric error": {metadata.IntValueType, metadata.UnknownValueType, MetricType{DataType: GaugeMetricDataType}, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + label := Label{ + Name: labelName, + ColumnName: labelColumnName, + ValueType: testCase.labelValueType, + } + metric := Metric{ + Label: Label{ + Name: metricName, + ColumnName: metricColumnName, + ValueType: testCase.metricValueType, + }, + DataType: testCase.dataType, + } + md := Metadata{ + Name: "name", + Query: "query", + MetricNamePrefix: "metricNamePrefix", + TimestampColumnName: "timestampColumnName", + HighCardinality: true, + Labels: []Label{label}, + Metrics: []Metric{metric}, + } + + metricsMetadata, err := md.MetricsMetadata() + + if testCase.expectError { + require.Nil(t, metricsMetadata) + require.Error(t, err) + } else { + require.NotNil(t, metricsMetadata) + require.NoError(t, err) + + assert.Equal(t, md.Name, metricsMetadata.Name) + assert.Equal(t, md.Query, metricsMetadata.Query) + assert.Equal(t, md.MetricNamePrefix, metricsMetadata.MetricNamePrefix) + assert.Equal(t, md.TimestampColumnName, metricsMetadata.TimestampColumnName) + assert.Equal(t, md.HighCardinality, metricsMetadata.HighCardinality) + assert.Len(t, metricsMetadata.QueryLabelValuesMetadata, 1) + assert.Len(t, metricsMetadata.QueryMetricValuesMetadata, 1) + } + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataconfig.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataconfig.go new file mode 100644 index 000000000000..bf1b72ba0a64 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataconfig.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadataparser" + +type MetadataConfig struct { + Metadata []Metadata `yaml:"metadata"` +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser.go new file mode 100644 index 000000000000..452425da1443 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadataparser" + +import ( + "gopkg.in/yaml.v3" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +func ParseMetadataConfig(metadataContentYaml []byte) ([]*metadata.MetricsMetadata, error) { + var config MetadataConfig + + err := yaml.Unmarshal(metadataContentYaml, &config) + if err != nil { + return nil, err + } + + result := make([]*metadata.MetricsMetadata, len(config.Metadata)) + + for i, parsedMetadata := range config.Metadata { + mData, err := parsedMetadata.MetricsMetadata() + if err != nil { + return nil, err + } + + result[i] = mData + } + + return result, nil +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go new file mode 100644 index 000000000000..1f0d7e93c944 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +func TestParseMetadataConfig(t *testing.T) { + testCases := map[string]struct { + filePath string + expectError bool + }{ + "Valid metadata": {"../../testdata/metadata_valid.yaml", false}, + "YAML parsing error": {"../../testdata/metadata_not_yaml.yaml", true}, + "Invalid metadata": {"../../testdata/metadata_invalid.yaml", true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + content, err := os.ReadFile(testCase.filePath) + + require.NoError(t, err) + + metadataSlice, err := ParseMetadataConfig(content) + + if testCase.expectError { + require.Error(t, err) + require.Nil(t, metadataSlice) + } else { + require.NoError(t, err) + assert.Len(t, metadataSlice, 2) + + mData := metadataSlice[0] + + assert.NotNil(t, mData) + assertMetricsMetadata(t, "current stats", mData) + + mData = metadataSlice[1] + + assert.NotNil(t, mData) + assertMetricsMetadata(t, "interval stats", mData) + } + }) + } +} + +func assertMetricsMetadata(t *testing.T, expectedName string, metricsMetadata *metadata.MetricsMetadata) { + assert.Equal(t, expectedName, metricsMetadata.Name) + assert.Equal(t, "query", metricsMetadata.Query) + assert.Equal(t, "metric_name_prefix", metricsMetadata.MetricNamePrefix) + + assert.Len(t, metricsMetadata.QueryLabelValuesMetadata, 1) + assert.Equal(t, "label_name", metricsMetadata.QueryLabelValuesMetadata[0].Name()) + assert.Equal(t, "LABEL_NAME", metricsMetadata.QueryLabelValuesMetadata[0].ColumnName()) + assert.Equal(t, metadata.StringValueType, metricsMetadata.QueryLabelValuesMetadata[0].ValueType()) + + assert.Len(t, metricsMetadata.QueryMetricValuesMetadata, 1) + assert.Equal(t, "metric_name", metricsMetadata.QueryMetricValuesMetadata[0].Name()) + assert.Equal(t, "METRIC_NAME", metricsMetadata.QueryMetricValuesMetadata[0].ColumnName()) + assert.Equal(t, "metric_unit", metricsMetadata.QueryMetricValuesMetadata[0].Unit()) + assert.Equal(t, pmetric.MetricTypeGauge, metricsMetadata.QueryMetricValuesMetadata[0].DataType().MetricType()) + assert.Equal(t, metadata.IntValueType, metricsMetadata.QueryMetricValuesMetadata[0].ValueType()) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metric.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metric.go new file mode 100644 index 000000000000..eec9b5b724b9 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metric.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadataparser" + +import ( + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +type Metric struct { + Label `yaml:",inline"` + DataType MetricType `yaml:"data"` + Unit string `yaml:"unit"` +} + +func (metric Metric) toMetricValueMetadata() (metadata.MetricValueMetadata, error) { + dataType, err := metric.DataType.toMetricType() + if err != nil { + return nil, fmt.Errorf("invalid value data type received for metric %q", metric.Name) + } + + return metadata.NewMetricValueMetadata(metric.Name, metric.ColumnName, dataType, metric.Unit, metric.ValueType) +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metric_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metric_test.go new file mode 100644 index 000000000000..33271dd33bb1 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metric_test.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + metricName = "labelName" + metricColumnName = "labelColumnName" + metricUnit = "metricUnit" +) + +func TestMetric_ToMetricValueMetadata(t *testing.T) { + testCases := map[string]struct { + valueType metadata.ValueType + dataType MetricType + expectedDataType pmetric.MetricType + expectError bool + }{ + "Value type is int and data type is gauge": {metadata.IntValueType, MetricType{DataType: GaugeMetricDataType}, pmetric.MetricTypeGauge, false}, + "Value type is int and data type is sum": {metadata.IntValueType, MetricType{DataType: SumMetricDataType, Aggregation: DeltaAggregationType, Monotonic: true}, pmetric.MetricTypeSum, false}, + "Value type is int and data type is unknown": {metadata.IntValueType, MetricType{DataType: UnknownMetricDataType}, pmetric.MetricTypeEmpty, true}, + "Value type is float and data type is gauge": {metadata.FloatValueType, MetricType{DataType: GaugeMetricDataType}, pmetric.MetricTypeGauge, false}, + "Value type is float and data type is sum": {metadata.FloatValueType, MetricType{DataType: SumMetricDataType, Aggregation: DeltaAggregationType, Monotonic: true}, pmetric.MetricTypeSum, false}, + "Value type is float and data type is unknown": {metadata.FloatValueType, MetricType{DataType: UnknownMetricDataType}, pmetric.MetricTypeEmpty, true}, + "Value type is unknown and data type is gauge": {metadata.UnknownValueType, MetricType{DataType: GaugeMetricDataType}, pmetric.MetricTypeEmpty, true}, + "Value type is unknown and data type is sum": {metadata.UnknownValueType, MetricType{DataType: SumMetricDataType, Aggregation: DeltaAggregationType, Monotonic: true}, pmetric.MetricTypeEmpty, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metric := Metric{ + Label: Label{ + Name: metricName, + ColumnName: metricColumnName, + ValueType: testCase.valueType, + }, + DataType: testCase.dataType, + Unit: metricUnit, + } + + valueMetadata, err := metric.toMetricValueMetadata() + + if testCase.expectError { + require.Nil(t, valueMetadata) + require.Error(t, err) + } else { + require.NotNil(t, valueMetadata) + require.NoError(t, err) + + assert.Equal(t, metric.Name, valueMetadata.Name()) + assert.Equal(t, metric.ColumnName, valueMetadata.ColumnName()) + assert.Equal(t, metric.Unit, valueMetadata.Unit()) + assert.Equal(t, testCase.expectedDataType, valueMetadata.DataType().MetricType()) + assert.Equal(t, metric.ValueType, valueMetadata.ValueType()) + } + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype.go new file mode 100644 index 000000000000..590bb737ceb8 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadataparser" + +import ( + "errors" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +type MetricDataType string + +const ( + UnknownMetricDataType MetricDataType = "unknown" + GaugeMetricDataType MetricDataType = "gauge" + SumMetricDataType MetricDataType = "sum" +) + +type AggregationType string + +const ( + UnknownAggregationType AggregationType = "unknown" + DeltaAggregationType AggregationType = "delta" + CumulativeAggregationType AggregationType = "cumulative" +) + +type MetricType struct { + DataType MetricDataType `yaml:"type"` + Aggregation AggregationType `yaml:"aggregation"` + Monotonic bool `yaml:"monotonic"` +} + +func (metricType MetricType) dataType() (pmetric.MetricType, error) { + var dataType pmetric.MetricType + + switch metricType.DataType { + case GaugeMetricDataType: + dataType = pmetric.MetricTypeGauge + case SumMetricDataType: + dataType = pmetric.MetricTypeSum + case UnknownMetricDataType: + fallthrough + default: + return pmetric.MetricTypeEmpty, errors.New("invalid data type received") + } + + return dataType, nil +} + +func (metricType MetricType) aggregationTemporality() (pmetric.AggregationTemporality, error) { + var aggregationTemporality pmetric.AggregationTemporality + + switch metricType.Aggregation { + case DeltaAggregationType: + aggregationTemporality = pmetric.AggregationTemporalityDelta + case CumulativeAggregationType: + aggregationTemporality = pmetric.AggregationTemporalityCumulative + case "": + aggregationTemporality = pmetric.AggregationTemporalityUnspecified + case UnknownAggregationType: + fallthrough + default: + return pmetric.AggregationTemporalityUnspecified, errors.New("invalid aggregation temporality received") + } + + return aggregationTemporality, nil +} + +func (metricType MetricType) toMetricType() (metadata.MetricType, error) { + dataType, err := metricType.dataType() + if err != nil { + return nil, err + } + + aggregationTemporality, err := metricType.aggregationTemporality() + if err != nil { + return nil, err + } + + return metadata.NewMetricType(dataType, aggregationTemporality, metricType.Monotonic), nil +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype_test.go new file mode 100644 index 000000000000..8203b2ab767c --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype_test.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestDataType(t *testing.T) { + testCases := map[string]struct { + dataType MetricDataType + expectedDataType pmetric.MetricType + expectError bool + }{ + "Gauge": {GaugeMetricDataType, pmetric.MetricTypeGauge, false}, + "Sum": {SumMetricDataType, pmetric.MetricTypeSum, false}, + "Invalid": {UnknownMetricDataType, pmetric.MetricTypeEmpty, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metricType := MetricType{ + DataType: testCase.dataType, + } + + actualDataType, err := metricType.dataType() + + if testCase.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + assert.Equal(t, testCase.expectedDataType, actualDataType) + }) + } +} + +func TestAggregationTemporality(t *testing.T) { + testCases := map[string]struct { + aggregationTemporality AggregationType + expectedAggregationTemporality pmetric.AggregationTemporality + expectError bool + }{ + "Cumulative": {CumulativeAggregationType, pmetric.AggregationTemporalityCumulative, false}, + "Delta": {DeltaAggregationType, pmetric.AggregationTemporalityDelta, false}, + "Empty": {"", pmetric.AggregationTemporalityUnspecified, false}, + "Invalid": {UnknownAggregationType, pmetric.AggregationTemporalityUnspecified, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metricType := MetricType{ + Aggregation: testCase.aggregationTemporality, + } + + actualAggregationTemporality, err := metricType.aggregationTemporality() + + if testCase.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + assert.Equal(t, testCase.expectedAggregationTemporality, actualAggregationTemporality) + }) + } +} + +func TestToMetricType(t *testing.T) { + testCases := map[string]struct { + dataType MetricDataType + aggregationTemporality AggregationType + expectedDataType pmetric.MetricType + expectedAggregationTemporality pmetric.AggregationTemporality + isMonotonic bool + expectError bool + }{ + "Happy path": {GaugeMetricDataType, CumulativeAggregationType, pmetric.MetricTypeGauge, pmetric.AggregationTemporalityCumulative, true, false}, + "Invalid data type": {"invalid", CumulativeAggregationType, pmetric.MetricTypeEmpty, pmetric.AggregationTemporalityCumulative, true, true}, + "Invalid aggregation": {GaugeMetricDataType, "invalid", pmetric.MetricTypeGauge, pmetric.AggregationTemporalityUnspecified, true, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metricType := MetricType{ + DataType: testCase.dataType, + Aggregation: testCase.aggregationTemporality, + Monotonic: testCase.isMonotonic, + } + + metricDataType, err := metricType.toMetricType() + + if testCase.expectError { + require.Error(t, err) + require.Nil(t, metricDataType) + } else { + require.NoError(t, err) + require.NotNil(t, metricDataType) + assert.Equal(t, testCase.expectedDataType, metricDataType.MetricType()) + assert.Equal(t, testCase.expectedAggregationTemporality, metricDataType.AggregationTemporality()) + assert.Equal(t, testCase.isMonotonic, metricDataType.IsMonotonic()) + } + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/package_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/package_test.go new file mode 100644 index 000000000000..79b7c5d0ed87 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/package_test.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metadataparser + +import ( + "testing" + + "go.uber.org/goleak" +) + +// The IgnoreTopFunction call prevents catching the leak generated by opencensus +// defaultWorker.Start which at this time is part of the package's init call. +// See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information. +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start")) +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/currentstatsreader.go b/receiver/googlecloudspannerreceiver/internal/statsreader/currentstatsreader.go new file mode 100644 index 000000000000..955c2e53b235 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/currentstatsreader.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/statsreader" + +import ( + "context" + "errors" + "fmt" + "time" + + "cloud.google.com/go/spanner" + "go.uber.org/zap" + "google.golang.org/api/iterator" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + dataStalenessSeconds = 15 + dataStalenessPeriod = dataStalenessSeconds * time.Second + + // Data reads for backfilling are always performed from stale replica nodes and not from the master. + // For current/fresh data reads main node is used. But in case collector started for example at mm:30+ it is safe + // to read data from stale replica nodes(at this time replica node will contain required data), since we are + // requesting data for mm:00. Also stale reads are faster than reads from main node. + dataStalenessSafeThresholdSeconds = 2 * dataStalenessSeconds +) + +type currentStatsReader struct { + logger *zap.Logger + database *datasource.Database + metricsMetadata *metadata.MetricsMetadata + topMetricsQueryMaxRows int + statement func(args statementArgs) statsStatement +} + +func newCurrentStatsReader( + logger *zap.Logger, + database *datasource.Database, + metricsMetadata *metadata.MetricsMetadata, + config ReaderConfig) *currentStatsReader { + + return ¤tStatsReader{ + logger: logger, + database: database, + metricsMetadata: metricsMetadata, + statement: currentStatsStatement, + topMetricsQueryMaxRows: config.TopMetricsQueryMaxRows, + } +} + +func (reader *currentStatsReader) Name() string { + return fmt.Sprintf("%v %v::%v::%v", reader.metricsMetadata.Name, reader.database.DatabaseID().ProjectID(), + reader.database.DatabaseID().InstanceID(), reader.database.DatabaseID().DatabaseName()) +} + +func (reader *currentStatsReader) Read(ctx context.Context) ([]*metadata.MetricsDataPoint, error) { + reader.logger.Debug("Executing read method", zap.String("reader", reader.Name())) + + stmt := reader.newPullStatement() + + return reader.pull(ctx, stmt) +} + +func (reader *currentStatsReader) newPullStatement() statsStatement { + args := statementArgs{ + query: reader.metricsMetadata.Query, + topMetricsQueryMaxRows: reader.topMetricsQueryMaxRows, + } + + return reader.statement(args) +} + +func (reader *currentStatsReader) pull(ctx context.Context, stmt statsStatement) ([]*metadata.MetricsDataPoint, error) { + transaction := reader.database.Client().Single() + if stmt.stalenessRead || isSafeToUseStaleRead(time.Now().UTC()) { + transaction = transaction.WithTimestampBound(spanner.ExactStaleness(dataStalenessPeriod)) + } + rowsIterator := transaction.Query(ctx, stmt.statement) + defer rowsIterator.Stop() + + var collectedDataPoints []*metadata.MetricsDataPoint + + for { + row, err := rowsIterator.Next() + if err != nil { + if errors.Is(err, iterator.Done) { + return collectedDataPoints, nil + } + return nil, fmt.Errorf("query %q failed with error: %w", stmt.statement.SQL, err) + } + + rowMetricsDataPoints, err := reader.metricsMetadata.RowToMetricsDataPoints(reader.database.DatabaseID(), row) + if err != nil { + return nil, fmt.Errorf("query %q failed with error: %w", stmt.statement.SQL, err) + } + + collectedDataPoints = append(collectedDataPoints, rowMetricsDataPoints...) + } +} + +func isSafeToUseStaleRead(readTimestamp time.Time) bool { + return (readTimestamp.Second() - dataStalenessSafeThresholdSeconds) >= 0 +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/currentstatsreader_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/currentstatsreader_test.go new file mode 100644 index 000000000000..e9f213a4b7b7 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/currentstatsreader_test.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader + +import ( + "context" + "testing" + "time" + + "cloud.google.com/go/spanner" + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + projectID = "ProjectID" + instanceID = "InstanceID" + databaseName = "DatabaseName" + + name = "name" +) + +func TestCurrentStatsReader_Name(t *testing.T) { + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + ctx := context.Background() + client, _ := spanner.NewClient(ctx, "") + database := datasource.NewDatabaseFromClient(client, databaseID) + metricsMetadata := &metadata.MetricsMetadata{ + Name: name, + } + + reader := currentStatsReader{ + database: database, + metricsMetadata: metricsMetadata, + } + + assert.Equal(t, reader.metricsMetadata.Name+" "+databaseID.ProjectID()+"::"+ + databaseID.InstanceID()+"::"+databaseID.DatabaseName(), reader.Name()) +} + +func TestNewCurrentStatsReader(t *testing.T) { + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + ctx := context.Background() + client, _ := spanner.NewClient(ctx, "") + database := datasource.NewDatabaseFromClient(client, databaseID) + metricsMetadata := &metadata.MetricsMetadata{ + Name: name, + } + logger := zaptest.NewLogger(t) + config := ReaderConfig{ + TopMetricsQueryMaxRows: topMetricsQueryMaxRows, + } + + reader := newCurrentStatsReader(logger, database, metricsMetadata, config) + + assert.Equal(t, database, reader.database) + assert.Equal(t, logger, reader.logger) + assert.Equal(t, metricsMetadata, reader.metricsMetadata) + assert.Equal(t, topMetricsQueryMaxRows, reader.topMetricsQueryMaxRows) +} + +func TestCurrentStatsReader_NewPullStatement(t *testing.T) { + metricsMetadata := &metadata.MetricsMetadata{ + Query: query, + } + + reader := currentStatsReader{ + metricsMetadata: metricsMetadata, + topMetricsQueryMaxRows: topMetricsQueryMaxRows, + statement: currentStatsStatement, + } + + assert.NotZero(t, reader.newPullStatement()) +} + +func TestIsSafeToUseStaleRead(t *testing.T) { + testCases := map[string]struct { + secondsAfterStartOfMinute int + expectedResult bool + }{ + "Statement with top metrics query max rows": {dataStalenessSeconds, false}, + "Statement without top metrics query max rows": {dataStalenessSeconds*2 + 5, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + readTimestamp := time.Date(2021, 9, 17, 16, 25, testCase.secondsAfterStartOfMinute, 0, time.UTC) + + assert.Equal(t, testCase.expectedResult, isSafeToUseStaleRead(readTimestamp)) + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader.go b/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader.go new file mode 100644 index 000000000000..38b37ea28ccf --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/statsreader" + +import ( + "context" + "fmt" + + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +type DatabaseReader struct { + database *datasource.Database + logger *zap.Logger + readers []Reader +} + +func NewDatabaseReader(ctx context.Context, + parsedMetadata []*metadata.MetricsMetadata, + databaseID *datasource.DatabaseID, + serviceAccountPath string, + readerConfig ReaderConfig, + logger *zap.Logger) (*DatabaseReader, error) { + + database, err := datasource.NewDatabase(ctx, databaseID, serviceAccountPath) + if err != nil { + return nil, fmt.Errorf("error occurred during client instantiation for database %q: %w", databaseID.ID(), err) + } + + readers := initializeReaders(logger, parsedMetadata, database, readerConfig) + + return &DatabaseReader{ + database: database, + logger: logger, + readers: readers, + }, nil +} + +func initializeReaders(logger *zap.Logger, parsedMetadata []*metadata.MetricsMetadata, + database *datasource.Database, readerConfig ReaderConfig) []Reader { + readers := make([]Reader, len(parsedMetadata)) + + for i, mData := range parsedMetadata { + switch mData.MetadataType() { + case metadata.MetricsMetadataTypeCurrentStats: + readers[i] = newCurrentStatsReader(logger, database, mData, readerConfig) + case metadata.MetricsMetadataTypeIntervalStats: + readers[i] = newIntervalStatsReader(logger, database, mData, readerConfig) + } + } + + return readers +} + +func (databaseReader *DatabaseReader) Name() string { + return databaseReader.database.DatabaseID().ID() +} + +func (databaseReader *DatabaseReader) Shutdown() { + databaseReader.logger.Debug( + "Closing connection to database", + zap.String("database", databaseReader.database.DatabaseID().ID()), + ) + databaseReader.database.Client().Close() +} + +func (databaseReader *DatabaseReader) Read(ctx context.Context) ([]*metadata.MetricsDataPoint, error) { + databaseReader.logger.Debug( + "Executing read method for database", + zap.String("database", databaseReader.database.DatabaseID().ID()), + ) + + var ( + result []*metadata.MetricsDataPoint + err error + ) + + for _, reader := range databaseReader.readers { + dataPoints, readErr := reader.Read(ctx) + result = append(result, dataPoints...) + if readErr != nil { + err = multierr.Append( + err, + fmt.Errorf("cannot read data for data points databaseReader %q because of an error: %w", reader.Name(), readErr), + ) + } + } + if err != nil { + databaseReader.logger.Warn( + "Errors encountered while reading database", + zap.String("database", databaseReader.database.DatabaseID().ID()), + zap.Int("error_count", len(multierr.Errors(err))), + ) + } + + return result, err +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader_test.go new file mode 100644 index 000000000000..b6f714596ca5 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader_test.go @@ -0,0 +1,180 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader + +import ( + "context" + "errors" + "testing" + + "cloud.google.com/go/spanner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +type mockReader struct { + mock.Mock +} + +func (r *mockReader) Name() string { + return "mockReader" +} + +func (r *mockReader) Read(ctx context.Context) ([]*metadata.MetricsDataPoint, error) { + args := r.Called(ctx) + return args.Get(0).([]*metadata.MetricsDataPoint), args.Error(1) +} + +func TestNewDatabaseReader(t *testing.T) { + ctx := context.Background() + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + serviceAccountPath := "../../testdata/serviceAccount.json" + readerConfig := ReaderConfig{ + TopMetricsQueryMaxRows: topMetricsQueryMaxRows, + BackfillEnabled: false, + } + logger := zaptest.NewLogger(t) + var parsedMetadata []*metadata.MetricsMetadata + + reader, err := NewDatabaseReader(ctx, parsedMetadata, databaseID, serviceAccountPath, readerConfig, logger) + + assert.NoError(t, err) + + defer executeShutdown(reader) + + assert.Equal(t, databaseID, reader.database.DatabaseID()) + assert.Equal(t, logger, reader.logger) + assert.Empty(t, reader.readers) +} + +func TestNewDatabaseReaderWithError(t *testing.T) { + ctx := context.Background() + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + serviceAccountPath := "does not exist" + readerConfig := ReaderConfig{ + TopMetricsQueryMaxRows: topMetricsQueryMaxRows, + BackfillEnabled: false, + } + logger := zaptest.NewLogger(t) + var parsedMetadata []*metadata.MetricsMetadata + + reader, err := NewDatabaseReader(ctx, parsedMetadata, databaseID, serviceAccountPath, readerConfig, logger) + + assert.Error(t, err) + // Do not call executeShutdown() here because reader hasn't been created + assert.Nil(t, reader) +} + +func TestInitializeReaders(t *testing.T) { + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + logger := zaptest.NewLogger(t) + var client *spanner.Client + database := datasource.NewDatabaseFromClient(client, databaseID) + currentStatsMetadata := createMetricsMetadata(query) + intervalStatsMetadata := createMetricsMetadata(query) + + currentStatsMetadata.Name = "Current" + currentStatsMetadata.TimestampColumnName = "" + intervalStatsMetadata.Name = "Interval" + + parsedMetadata := []*metadata.MetricsMetadata{ + currentStatsMetadata, + intervalStatsMetadata, + } + readerConfig := ReaderConfig{ + TopMetricsQueryMaxRows: topMetricsQueryMaxRows, + BackfillEnabled: false, + } + + readers := initializeReaders(logger, parsedMetadata, database, readerConfig) + + assert.Len(t, readers, 2) + assert.IsType(t, ¤tStatsReader{}, readers[0]) + assert.IsType(t, &intervalStatsReader{}, readers[1]) +} + +func TestDatabaseReader_Name(t *testing.T) { + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + ctx := context.Background() + client, _ := spanner.NewClient(ctx, databaseName) + database := datasource.NewDatabaseFromClient(client, databaseID) + logger := zaptest.NewLogger(t) + + reader := &DatabaseReader{ + logger: logger, + database: database, + } + defer executeShutdown(reader) + + assert.Equal(t, database.DatabaseID().ID(), reader.Name()) +} + +func TestDatabaseReader_Shutdown(t *testing.T) { + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + ctx := context.Background() + client, _ := spanner.NewClient(ctx, databaseName) + database := datasource.NewDatabaseFromClient(client, databaseID) + logger := zaptest.NewLogger(t) + + reader := &DatabaseReader{ + logger: logger, + database: database, + } + + executeShutdown(reader) +} + +func TestDatabaseReader_Read(t *testing.T) { + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + ctx := context.Background() + client, _ := spanner.NewClient(ctx, databaseName) + database := datasource.NewDatabaseFromClient(client, databaseID) + logger := zaptest.NewLogger(t) + testCases := map[string]struct { + expectedError error + }{ + "Read with no error": {nil}, + "Read with error": {errors.New("read error")}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + mr := &mockReader{} + readers := []Reader{mr} + reader := &DatabaseReader{ + logger: logger, + database: database, + readers: readers, + } + defer executeShutdown(reader) + + mr.On("Read", ctx).Return([]*metadata.MetricsDataPoint{}, testCase.expectedError) + + _, err := reader.Read(ctx) + + mr.AssertExpectations(t) + + if testCase.expectedError != nil { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func executeShutdown(reader *DatabaseReader) { + // Doing this because can't control instantiation of Spanner DB client. + // In Shutdown() invocation client with wrong DB parameters produces panic when calling its Close() method. + defer func() { + _ = recover() + }() + + reader.Shutdown() +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/intervalstatsreader.go b/receiver/googlecloudspannerreceiver/internal/statsreader/intervalstatsreader.go new file mode 100644 index 000000000000..485edbee0d47 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/intervalstatsreader.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/statsreader" + +import ( + "context" + "time" + + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + // Max duration of data backfilling(if enabled). + // Different backends can support or not support such option. + // Since, the initial intent was to work mainly with Prometheus backend, + // this constant was set to 1 hour - max allowed interval by Prometheus. + backfillIntervalDuration = time.Hour + topLockStatsMetricName = "top minute lock stats" + topQueryStatsMetricName = "top minute query stats" + maxLengthTruncateText = 1024 +) + +type intervalStatsReader struct { + currentStatsReader + timestampsGenerator *timestampsGenerator + lastPullTimestamp time.Time + hideTopnLockstatsRowrangestartkey bool + truncateText bool +} + +func newIntervalStatsReader( + logger *zap.Logger, + database *datasource.Database, + metricsMetadata *metadata.MetricsMetadata, + config ReaderConfig) *intervalStatsReader { + + reader := currentStatsReader{ + logger: logger, + database: database, + metricsMetadata: metricsMetadata, + statement: intervalStatsStatement, + topMetricsQueryMaxRows: config.TopMetricsQueryMaxRows, + } + tsGenerator := ×tampsGenerator{ + backfillEnabled: config.BackfillEnabled, + difference: time.Minute, + } + + return &intervalStatsReader{ + currentStatsReader: reader, + timestampsGenerator: tsGenerator, + hideTopnLockstatsRowrangestartkey: config.HideTopnLockstatsRowrangestartkey, + truncateText: config.TruncateText, + } +} + +func (reader *intervalStatsReader) Read(ctx context.Context) ([]*metadata.MetricsDataPoint, error) { + reader.logger.Debug("Executing read method", zap.String("reader", reader.Name())) + + // Generating pull timestamps + pullTimestamps := reader.timestampsGenerator.pullTimestamps(reader.lastPullTimestamp, time.Now().UTC()) + + var collectedDataPoints []*metadata.MetricsDataPoint + + // Pulling metrics for each generated pull timestamp + timestampsAmount := len(pullTimestamps) + for i, pullTimestamp := range pullTimestamps { + stmt := reader.newPullStatement(pullTimestamp) + // Latest timestamp for backfilling must be read from actual data(not stale) + if i == (timestampsAmount-1) && reader.isBackfillExecution() { + stmt.stalenessRead = false + } + dataPoints, err := reader.pull(ctx, stmt) + if err != nil { + return nil, err + } + metricMetadata := reader.currentStatsReader.metricsMetadata + if reader.hideTopnLockstatsRowrangestartkey && metricMetadata != nil && metricMetadata.Name == topLockStatsMetricName { + for _, dataPoint := range dataPoints { + dataPoint.HideLockStatsRowrangestartkeyPII() + } + } + if reader.truncateText && metricMetadata != nil && metricMetadata.Name == topQueryStatsMetricName { + for _, dataPoint := range dataPoints { + dataPoint.TruncateQueryText(maxLengthTruncateText) + } + } + + collectedDataPoints = append(collectedDataPoints, dataPoints...) + } + + reader.lastPullTimestamp = pullTimestamps[timestampsAmount-1] + + return collectedDataPoints, nil +} + +func (reader *intervalStatsReader) newPullStatement(pullTimestamp time.Time) statsStatement { + args := statementArgs{ + query: reader.metricsMetadata.Query, + topMetricsQueryMaxRows: reader.topMetricsQueryMaxRows, + pullTimestamp: pullTimestamp, + stalenessRead: reader.isBackfillExecution(), + } + + return reader.statement(args) +} + +func (reader *intervalStatsReader) isBackfillExecution() bool { + return reader.timestampsGenerator.isBackfillExecution(reader.lastPullTimestamp) +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/intervalstatsreader_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/intervalstatsreader_test.go new file mode 100644 index 000000000000..10b7112cf0e2 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/intervalstatsreader_test.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader + +import ( + "context" + "testing" + "time" + + "cloud.google.com/go/spanner" + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +func TestIntervalStatsReader_Name(t *testing.T) { + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + ctx := context.Background() + client, _ := spanner.NewClient(ctx, "") + database := datasource.NewDatabaseFromClient(client, databaseID) + metricsMetadata := &metadata.MetricsMetadata{ + Name: name, + } + + reader := intervalStatsReader{ + currentStatsReader: currentStatsReader{ + database: database, + metricsMetadata: metricsMetadata, + }, + } + + assert.Equal(t, reader.metricsMetadata.Name+" "+databaseID.ProjectID()+"::"+ + databaseID.InstanceID()+"::"+databaseID.DatabaseName(), reader.Name()) +} + +func TestNewIntervalStatsReader(t *testing.T) { + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + ctx := context.Background() + client, _ := spanner.NewClient(ctx, "") + database := datasource.NewDatabaseFromClient(client, databaseID) + metricsMetadata := &metadata.MetricsMetadata{ + Name: name, + } + logger := zaptest.NewLogger(t) + config := ReaderConfig{ + TopMetricsQueryMaxRows: topMetricsQueryMaxRows, + BackfillEnabled: true, + HideTopnLockstatsRowrangestartkey: true, + TruncateText: true, + } + + reader := newIntervalStatsReader(logger, database, metricsMetadata, config) + + assert.Equal(t, database, reader.database) + assert.Equal(t, logger, reader.logger) + assert.Equal(t, metricsMetadata, reader.metricsMetadata) + assert.Equal(t, topMetricsQueryMaxRows, reader.topMetricsQueryMaxRows) + assert.NotNil(t, reader.timestampsGenerator) + assert.True(t, reader.timestampsGenerator.backfillEnabled) + assert.True(t, reader.hideTopnLockstatsRowrangestartkey) + assert.True(t, reader.truncateText) +} + +func TestIntervalStatsReader_NewPullStatement(t *testing.T) { + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + timestamp := time.Now().UTC() + logger := zaptest.NewLogger(t) + config := ReaderConfig{ + TopMetricsQueryMaxRows: topMetricsQueryMaxRows, + BackfillEnabled: false, + HideTopnLockstatsRowrangestartkey: true, + TruncateText: true, + } + ctx := context.Background() + client, _ := spanner.NewClient(ctx, "") + database := datasource.NewDatabaseFromClient(client, databaseID) + metricsMetadata := &metadata.MetricsMetadata{ + Query: query, + } + reader := newIntervalStatsReader(logger, database, metricsMetadata, config) + + assert.NotZero(t, reader.newPullStatement(timestamp)) +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/package_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/package_test.go new file mode 100644 index 000000000000..fbb8d96cfa7b --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/package_test.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader + +import ( + "testing" + + "go.uber.org/goleak" +) + +// See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information on ignore. +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start")) +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/projectreader.go b/receiver/googlecloudspannerreceiver/internal/statsreader/projectreader.go new file mode 100644 index 000000000000..7a12980ffd4c --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/projectreader.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/statsreader" + +import ( + "context" + "strings" + + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +type ProjectReader struct { + databaseReaders []CompositeReader + logger *zap.Logger +} + +func NewProjectReader(databaseReaders []CompositeReader, logger *zap.Logger) *ProjectReader { + return &ProjectReader{ + databaseReaders: databaseReaders, + logger: logger, + } +} + +func (projectReader *ProjectReader) Shutdown() { + for _, databaseReader := range projectReader.databaseReaders { + projectReader.logger.Info("Shutting down projectReader for database", + zap.String("database", databaseReader.Name())) + databaseReader.Shutdown() + } +} + +func (projectReader *ProjectReader) Read(ctx context.Context) ([]*metadata.MetricsDataPoint, error) { + var ( + result []*metadata.MetricsDataPoint + err error + ) + + for _, databaseReader := range projectReader.databaseReaders { + dataPoints, readErr := databaseReader.Read(ctx) + if readErr == nil { + result = append(result, dataPoints...) + } else { + err = multierr.Append(err, readErr) + } + } + + return result, err +} + +func (projectReader *ProjectReader) Name() string { + databaseReaderNames := make([]string, len(projectReader.databaseReaders)) + + for i, databaseReader := range projectReader.databaseReaders { + databaseReaderNames[i] = databaseReader.Name() + } + + return "Project reader for: " + strings.Join(databaseReaderNames, ",") +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/projectreader_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/projectreader_test.go new file mode 100644 index 000000000000..bb9c06bfbeb2 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/projectreader_test.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +type mockCompositeReader struct { + mock.Mock +} + +func (r *mockCompositeReader) Name() string { + return "mockCompositeReader" +} + +func (r *mockCompositeReader) Read(ctx context.Context) ([]*metadata.MetricsDataPoint, error) { + args := r.Called(ctx) + return args.Get(0).([]*metadata.MetricsDataPoint), args.Error(1) +} + +func (r *mockCompositeReader) Shutdown() { + // Do nothing +} + +func TestNewProjectReader(t *testing.T) { + logger := zaptest.NewLogger(t) + var databaseReaders []CompositeReader + + reader := NewProjectReader(databaseReaders, logger) + defer reader.Shutdown() + + assert.NotNil(t, reader) + assert.Equal(t, logger, reader.logger) + assert.Equal(t, databaseReaders, reader.databaseReaders) +} + +func TestProjectReader_Shutdown(t *testing.T) { + logger := zaptest.NewLogger(t) + + databaseReaders := []CompositeReader{&mockCompositeReader{}} + + reader := ProjectReader{ + databaseReaders: databaseReaders, + logger: logger, + } + + reader.Shutdown() +} + +func TestProjectReader_Read(t *testing.T) { + ctx := context.Background() + logger := zaptest.NewLogger(t) + testCases := map[string]struct { + expectedError error + }{ + "Happy path": {nil}, + "Error occurred": {errors.New("read error")}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + compositeReader := &mockCompositeReader{} + databaseReaders := []CompositeReader{compositeReader} + reader := ProjectReader{ + databaseReaders: databaseReaders, + logger: logger, + } + defer reader.Shutdown() + + compositeReader.On("Read", ctx).Return([]*metadata.MetricsDataPoint{}, testCase.expectedError) + + _, err := reader.Read(ctx) + + compositeReader.AssertExpectations(t) + + if testCase.expectedError != nil { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestProjectReader_Name(t *testing.T) { + logger := zaptest.NewLogger(t) + + databaseReader := &mockCompositeReader{} + databaseReaders := []CompositeReader{databaseReader} + + reader := ProjectReader{ + databaseReaders: databaseReaders, + logger: logger, + } + defer reader.Shutdown() + + name := reader.Name() + + assert.Equal(t, "Project reader for: "+databaseReader.Name(), name) +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/reader.go b/receiver/googlecloudspannerreceiver/internal/statsreader/reader.go new file mode 100644 index 000000000000..5e05645ca81c --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/reader.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/statsreader" + +import ( + "context" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +type ReaderConfig struct { + TopMetricsQueryMaxRows int + BackfillEnabled bool + HideTopnLockstatsRowrangestartkey bool + TruncateText bool +} + +type Reader interface { + Name() string + Read(ctx context.Context) ([]*metadata.MetricsDataPoint, error) +} + +// CompositeReader - this interface is used for the composition of multiple Reader(s). +// Main difference between it and Reader - this interface also has Shutdown method for performing some additional +// cleanup necessary for each Reader instance. +type CompositeReader interface { + Reader + // Shutdown Use this method to perform any additional cleanup of underlying components. + Shutdown() +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/statements.go b/receiver/googlecloudspannerreceiver/internal/statsreader/statements.go new file mode 100644 index 000000000000..651b775f94ff --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/statements.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/statsreader" + +import ( + "time" + + "cloud.google.com/go/spanner" +) + +const ( + topMetricsQueryLimitParameterName = "topMetricsQueryMaxRows" + topMetricsQueryLimitCondition = " LIMIT @" + topMetricsQueryLimitParameterName + + pullTimestampParameterName = "pullTimestamp" +) + +type statementArgs struct { + query string + topMetricsQueryMaxRows int + pullTimestamp time.Time + stalenessRead bool +} + +type statsStatement struct { + statement spanner.Statement + stalenessRead bool +} + +func currentStatsStatement(args statementArgs) statsStatement { + stmt := spanner.Statement{SQL: args.query, Params: map[string]any{}} + + if args.topMetricsQueryMaxRows > 0 { + stmt = spanner.Statement{ + SQL: args.query + topMetricsQueryLimitCondition, + Params: map[string]any{ + topMetricsQueryLimitParameterName: args.topMetricsQueryMaxRows, + }, + } + } + + return statsStatement{ + statement: stmt, + stalenessRead: args.stalenessRead, + } +} + +func intervalStatsStatement(args statementArgs) statsStatement { + stmt := currentStatsStatement(args) + + if len(stmt.statement.Params) == 0 { + stmt.statement.Params = map[string]any{} + } + + stmt.statement.Params[pullTimestampParameterName] = args.pullTimestamp + + return stmt +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/statements_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/statements_test.go new file mode 100644 index 000000000000..8ba1d982048b --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/statements_test.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + query = "query" + + topMetricsQueryMaxRows = 10 +) + +func TestCurrentStatsStatement(t *testing.T) { + testCases := map[string]struct { + topMetricsQueryMaxRows int + expectedSQL string + expectedParams map[string]any + }{ + "Statement with top metrics query max rows": {topMetricsQueryMaxRows, query + topMetricsQueryLimitCondition, map[string]any{topMetricsQueryLimitParameterName: topMetricsQueryMaxRows}}, + "Statement without top metrics query max rows": {0, query, map[string]any{}}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + args := statementArgs{ + query: query, + topMetricsQueryMaxRows: testCase.topMetricsQueryMaxRows, + stalenessRead: true, + } + + stmt := currentStatsStatement(args) + + assert.Equal(t, testCase.expectedSQL, stmt.statement.SQL) + assert.Equal(t, testCase.expectedParams, stmt.statement.Params) + assert.True(t, stmt.stalenessRead) + }) + } +} + +func TestIntervalStatsStatement(t *testing.T) { + pullTimestamp := time.Now().UTC() + + testCases := map[string]struct { + topMetricsQueryMaxRows int + expectedSQL string + expectedParams map[string]any + }{ + "Statement with top metrics query max rows": {topMetricsQueryMaxRows, query + topMetricsQueryLimitCondition, map[string]any{ + topMetricsQueryLimitParameterName: topMetricsQueryMaxRows, + pullTimestampParameterName: pullTimestamp, + }}, + "Statement without top metrics query max rows": {0, query, map[string]any{pullTimestampParameterName: pullTimestamp}}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + args := statementArgs{ + query: query, + topMetricsQueryMaxRows: testCase.topMetricsQueryMaxRows, + pullTimestamp: pullTimestamp, + stalenessRead: true, + } + + stmt := intervalStatsStatement(args) + + assert.Equal(t, testCase.expectedSQL, stmt.statement.SQL) + assert.Equal(t, testCase.expectedParams, stmt.statement.Params) + assert.True(t, stmt.stalenessRead) + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go new file mode 100644 index 000000000000..72641950f094 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader + +import ( + "context" + "testing" + "time" + + "cloud.google.com/go/spanner" + database "cloud.google.com/go/spanner/admin/database/apiv1" + "cloud.google.com/go/spanner/admin/database/apiv1/databasepb" + "cloud.google.com/go/spanner/spannertest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" +) + +const ( + spannerDatabaseName = "projects/" + projectID + "/instances/" + instanceID + "/databases/" + databaseName + maxRowsLimit = 1 +) + +func createMetricsMetadata(query string) *metadata.MetricsMetadata { + return createMetricsMetadataFromTimestampColumn(query, "INTERVAL_END") +} + +func createMetricsMetadataFromTimestampColumn(query string, timestampColumn string) *metadata.MetricsMetadata { + labelValueMetadata, _ := metadata.NewLabelValueMetadata("metric_label", "METRIC_LABEL", + metadata.StringValueType) + // Labels + queryLabelValuesMetadata := []metadata.LabelValueMetadata{labelValueMetadata} + + metricDataType := metadata.NewMetricType(pmetric.MetricTypeGauge, pmetric.AggregationTemporalityUnspecified, false) + + metricValueMetadata, _ := metadata.NewMetricValueMetadata("metric_value", "METRIC_VALUE", metricDataType, "unit", + metadata.IntValueType) + // Metrics + queryMetricValuesMetadata := []metadata.MetricValueMetadata{metricValueMetadata} + + return &metadata.MetricsMetadata{ + Name: "test stats", + Query: query, + MetricNamePrefix: "test_stats/", + TimestampColumnName: timestampColumn, + QueryLabelValuesMetadata: queryLabelValuesMetadata, + QueryMetricValuesMetadata: queryMetricValuesMetadata, + } +} + +func createCurrentStatsReaderWithCorruptedMetadata(client *spanner.Client) Reader { //nolint + query := "SELECT * FROM STATS" + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) + + return newCurrentStatsReader(zap.NewNop(), databaseFromClient, + createMetricsMetadataFromTimestampColumn(query, "NOT_EXISTING"), ReaderConfig{}) +} + +func createCurrentStatsReader(client *spanner.Client) Reader { //nolint + query := "SELECT * FROM STATS" + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) + + return newCurrentStatsReader(zap.NewNop(), databaseFromClient, createMetricsMetadata(query), ReaderConfig{}) +} + +func createCurrentStatsReaderWithMaxRowsLimit(client *spanner.Client) Reader { //nolint + query := "SELECT * FROM STATS" + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) + config := ReaderConfig{ + TopMetricsQueryMaxRows: maxRowsLimit, + } + + return newCurrentStatsReader(zap.NewNop(), databaseFromClient, createMetricsMetadata(query), config) +} + +func createIntervalStatsReaderWithCorruptedMetadata(client *spanner.Client, backfillEnabled bool) Reader { //nolint + query := "SELECT * FROM STATS WHERE INTERVAL_END = @pullTimestamp" + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) + config := ReaderConfig{ + BackfillEnabled: backfillEnabled, + } + + return newIntervalStatsReader(zap.NewNop(), databaseFromClient, + createMetricsMetadataFromTimestampColumn(query, "NOT_EXISTING"), config) +} + +func createIntervalStatsReader(client *spanner.Client, backfillEnabled bool) Reader { //nolint + query := "SELECT * FROM STATS WHERE INTERVAL_END = @pullTimestamp" + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) + config := ReaderConfig{ + BackfillEnabled: backfillEnabled, + } + + return newIntervalStatsReader(zap.NewNop(), databaseFromClient, createMetricsMetadata(query), config) +} + +func createIntervalStatsReaderWithMaxRowsLimit(client *spanner.Client, backfillEnabled bool) Reader { //nolint + query := "SELECT * FROM STATS WHERE INTERVAL_END = @pullTimestamp" + databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) + databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) + config := ReaderConfig{ + TopMetricsQueryMaxRows: maxRowsLimit, + BackfillEnabled: backfillEnabled, + } + + return newIntervalStatsReader(zap.NewNop(), databaseFromClient, createMetricsMetadata(query), config) +} + +func TestStatsReaders_Read(t *testing.T) { + t.Skip("Flaky test - See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/6318") + timestamp := shiftToStartOfMinute(time.Now().UTC()) + ctx := context.Background() + server, err := spannertest.NewServer(":0") + require.NoError(t, err) + defer server.Close() + + conn, err := grpc.NewClient(server.Addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + + databaseAdminClient, err := database.NewDatabaseAdminClient(ctx, option.WithGRPCConn(conn)) + require.NoError(t, err) + defer func(databaseAdminClient *database.DatabaseAdminClient) { + _ = databaseAdminClient.Close() + }(databaseAdminClient) + + op, err := databaseAdminClient.UpdateDatabaseDdl(ctx, &databasepb.UpdateDatabaseDdlRequest{ + Database: databaseName, + Statements: []string{`CREATE TABLE STATS ( + INTERVAL_END TIMESTAMP, + METRIC_LABEL STRING(MAX), + METRIC_VALUE INT64 + ) PRIMARY KEY (METRIC_LABEL) + `}, + }) + require.NoError(t, err) + + err = op.Wait(ctx) + require.NoError(t, err) + + databaseClient, err := spanner.NewClient(ctx, spannerDatabaseName, option.WithGRPCConn(conn)) + require.NoError(t, err) + defer databaseClient.Close() + + _, err = databaseClient.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("STATS", + []string{"INTERVAL_END", "METRIC_LABEL", "METRIC_VALUE"}, + []any{timestamp, "Qwerty", 10}), + spanner.Insert("STATS", + []string{"INTERVAL_END", "METRIC_LABEL", "METRIC_VALUE"}, + []any{timestamp.Add(-1 * time.Minute), "Test", 20}), + spanner.Insert("STATS", + []string{"INTERVAL_END", "METRIC_LABEL", "METRIC_VALUE"}, + []any{timestamp.Add(-1 * time.Minute), "Spanner", 30}), + }) + + require.NoError(t, err) + + testCases := map[string]struct { + reader Reader + expectedMetricsAmount int + expectError bool + }{ + "Current stats reader without max rows limit": {createCurrentStatsReader(databaseClient), 3, false}, + "Current stats reader with max rows limit": {createCurrentStatsReaderWithMaxRowsLimit(databaseClient), 1, false}, + "Current stats reader with corrupted metadata": {createCurrentStatsReaderWithCorruptedMetadata(databaseClient), 0, true}, + "Interval stats reader without backfill without max rows limit": {createIntervalStatsReader(databaseClient, false), 1, false}, + "Interval stats reader without backfill with max rows limit": {createIntervalStatsReaderWithMaxRowsLimit(databaseClient, false), 1, false}, + "Interval stats reader with backfill without max rows limit": {createIntervalStatsReader(databaseClient, true), 3, false}, + "Interval stats reader with backfill with max rows limit": {createIntervalStatsReaderWithMaxRowsLimit(databaseClient, true), 2, false}, + "Interval stats reader without backfill with corrupted metadata": {createIntervalStatsReaderWithCorruptedMetadata(databaseClient, false), 0, true}, + "Interval stats reader with backfill with corrupted metadata": {createIntervalStatsReaderWithCorruptedMetadata(databaseClient, true), 0, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + metrics, err := testCase.reader.Read(ctx) + + if testCase.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Len(t, metrics, testCase.expectedMetricsAmount) + } + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator.go b/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator.go new file mode 100644 index 000000000000..3a7e59718580 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/statsreader" + +import "time" + +type timestampsGenerator struct { + backfillEnabled bool + difference time.Duration +} + +// This slice will always contain at least one value - now shifted to the start of minute(upper bound). +// In case lastPullTimestamp is greater than now argument slice will contain only one value - now shifted to the start of minute(upper bound). +func (g *timestampsGenerator) pullTimestamps(lastPullTimestamp time.Time, now time.Time) []time.Time { + var timestamps []time.Time + upperBound := shiftToStartOfMinute(now) + + if lastPullTimestamp.IsZero() { + if g.backfillEnabled { + timestamps = pullTimestampsWithDifference(upperBound.Add(-1*backfillIntervalDuration), upperBound, + g.difference) + } else { + timestamps = []time.Time{upperBound} + } + } else { + // lastPullTimestamp is already set to start of minute + timestamps = pullTimestampsWithDifference(lastPullTimestamp, upperBound, g.difference) + } + + return timestamps +} + +// This slice will always contain at least one value(upper bound). +// Difference between each two points is 1 minute. +func pullTimestampsWithDifference(lowerBound time.Time, upperBound time.Time, difference time.Duration) []time.Time { + var timestamps []time.Time + + for value := lowerBound.Add(difference); !value.After(upperBound); value = value.Add(difference) { + timestamps = append(timestamps, value) + } + + // To ensure that we did not miss upper bound and timestamps slice will contain at least one value + if len(timestamps) == 0 || timestamps[len(timestamps)-1] != upperBound { + timestamps = append(timestamps, upperBound) + } + + return timestamps +} + +func shiftToStartOfMinute(now time.Time) time.Time { + return time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), 0, 0, now.Location()) +} + +func (g *timestampsGenerator) isBackfillExecution(lastPullTimestamp time.Time) bool { + return lastPullTimestamp.IsZero() && g.backfillEnabled +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go new file mode 100644 index 000000000000..93b666cf31a8 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package statsreader + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestTimestampsGenerator_PullTimestamps(t *testing.T) { + now := time.Date(2021, 9, 17, 16, 25, 30, 0, time.UTC) + nowAtStartOfMinute := shiftToStartOfMinute(now) + backfillIntervalAgo := nowAtStartOfMinute.Add(-1 * backfillIntervalDuration) + backfillIntervalAgoWithSomeSeconds := backfillIntervalAgo.Add(-15 * time.Second) + lastPullTimestampInFuture := nowAtStartOfMinute.Add(backfillIntervalDuration) + + testCases := map[string]struct { + lastPullTimestamp time.Time + backfillEnabled bool + amountOfTimestamps int + }{ + "Zero last pull timestamp without backfill": {time.Time{}, false, 1}, + "Zero last pull timestamp with backfill": {time.Time{}, true, int(backfillIntervalDuration.Minutes())}, + "Last pull timestamp now at start of minute backfill does not matter": {nowAtStartOfMinute, false, 1}, + "Last pull timestamp back fill interval ago of minute backfill does not matter": {backfillIntervalAgo, false, int(backfillIntervalDuration.Minutes())}, + "Last pull timestamp back fill interval ago with some seconds of minute backfill does not matter": {backfillIntervalAgoWithSomeSeconds, false, int(backfillIntervalDuration.Minutes()) + 1}, + "Last pull timestamp greater than now without backfill": {lastPullTimestampInFuture, false, 1}, + "Last pull timestamp greater than now with backfill": {lastPullTimestampInFuture, true, 1}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + generator := ×tampsGenerator{ + backfillEnabled: testCase.backfillEnabled, + difference: time.Minute, + } + timestamps := generator.pullTimestamps(testCase.lastPullTimestamp, now) + + assert.Len(t, timestamps, testCase.amountOfTimestamps) + }) + } +} + +func TestPullTimestampsWithDifference(t *testing.T) { + expectedAmountOfTimestamps := 5 + lowerBound := time.Date(2021, 9, 17, 16, 25, 0, 0, time.UTC) + upperBound := lowerBound.Add(time.Duration(expectedAmountOfTimestamps) * time.Minute) + + timestamps := pullTimestampsWithDifference(lowerBound, upperBound, time.Minute) + + assert.Len(t, timestamps, expectedAmountOfTimestamps) + + expectedTimestamp := lowerBound.Add(time.Minute) + + for _, timestamp := range timestamps { + assert.Equal(t, expectedTimestamp, timestamp) + expectedTimestamp = expectedTimestamp.Add(time.Minute) + } + + // Check edge case: ensure that we didn't miss upperBound + upperBound = lowerBound.Add(5 * time.Minute).Add(15 * time.Second) + timestamps = pullTimestampsWithDifference(lowerBound, upperBound, time.Minute) + + assert.Len(t, timestamps, 6) + + expectedTimestamp = lowerBound.Add(time.Minute) + + for i := 0; i < expectedAmountOfTimestamps; i++ { + assert.Equal(t, expectedTimestamp, timestamps[i]) + expectedTimestamp = expectedTimestamp.Add(time.Minute) + } + + assert.Equal(t, upperBound, timestamps[expectedAmountOfTimestamps]) + +} + +func TestShiftToStartOfMinute(t *testing.T) { + now := time.Now().UTC() + actual := shiftToStartOfMinute(now) + + assert.Equal(t, 0, actual.Second()) + assert.Equal(t, 0, actual.Nanosecond()) +} + +func TestTimestampsGenerator_IsBackfillExecution(t *testing.T) { + testCases := map[string]struct { + lastPullTimestamp time.Time + backfillEnabled bool + expectedResult bool + }{ + "Zero last pull timestamp with backfill": {time.Time{}, true, true}, + "Non-zero last pull timestamp with backfill": {time.Now().UTC(), true, false}, + "Zero last pull timestamp with no backfill": {time.Time{}, false, false}, + "Non-zero last pull timestamp with no backfill": {time.Now().UTC(), false, false}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + generator := ×tampsGenerator{ + backfillEnabled: testCase.backfillEnabled, + } + + assert.Equal(t, testCase.expectedResult, generator.isBackfillExecution(testCase.lastPullTimestamp)) + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/metadata.yaml b/receiver/googlecloudspannerreceiver/metadata.yaml new file mode 100644 index 000000000000..f24a89cf0df2 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/metadata.yaml @@ -0,0 +1,14 @@ +type: googlecloudspanner + +status: + class: receiver + stability: + beta: [metrics] + distributions: [contrib] + codeowners: + active: [dashpole, dsimil, KiranmayiB, harishbohara11] + emeritus: [architjugran, varunraiko] +tests: + config: + goleak: + skip: true \ No newline at end of file diff --git a/receiver/googlecloudspannerreceiver/receiver.go b/receiver/googlecloudspannerreceiver/receiver.go new file mode 100644 index 000000000000..ef2397c3adfa --- /dev/null +++ b/receiver/googlecloudspannerreceiver/receiver.go @@ -0,0 +1,196 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package googlecloudspannerreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver" + +import ( + "context" + _ "embed" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/scrapererror" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filterfactory" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadataparser" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/statsreader" +) + +//go:embed "internal/metadataconfig/metrics.yaml" +var metadataYaml []byte + +var _ receiver.Metrics = (*googleCloudSpannerReceiver)(nil) + +type googleCloudSpannerReceiver struct { + logger *zap.Logger + config *Config + cancel context.CancelFunc + projectReaders []statsreader.CompositeReader + metricsBuilder metadata.MetricsBuilder +} + +func newGoogleCloudSpannerReceiver(logger *zap.Logger, config *Config) *googleCloudSpannerReceiver { + return &googleCloudSpannerReceiver{ + logger: logger, + config: config, + } +} + +func (r *googleCloudSpannerReceiver) Scrape(ctx context.Context) (pmetric.Metrics, error) { + var ( + allMetricsDataPoints []*metadata.MetricsDataPoint + err error + ) + + for _, projectReader := range r.projectReaders { + dataPoints, readErr := projectReader.Read(ctx) + allMetricsDataPoints = append(allMetricsDataPoints, dataPoints...) + if readErr != nil { + err = multierr.Append(err, readErr) + } + } + + metrics, buildErr := r.metricsBuilder.Build(allMetricsDataPoints) + if buildErr != nil { + err = multierr.Append(err, buildErr) + } + + if err != nil && metrics.DataPointCount() > 0 { + err = scrapererror.NewPartialScrapeError(err, len(multierr.Errors(err))) + } + return metrics, err +} + +func (r *googleCloudSpannerReceiver) Start(ctx context.Context, _ component.Host) error { + ctx, r.cancel = context.WithCancel(ctx) + err := r.initialize(ctx) + if err != nil { + return err + } + + return nil +} + +func (r *googleCloudSpannerReceiver) Shutdown(context.Context) error { + for _, projectReader := range r.projectReaders { + projectReader.Shutdown() + } + + if r.metricsBuilder == nil { + return nil + } + err := r.metricsBuilder.Shutdown() + if err != nil { + return err + } + + r.cancel() + + return nil +} + +func (r *googleCloudSpannerReceiver) initialize(ctx context.Context) error { + parsedMetadata, err := metadataparser.ParseMetadataConfig(metadataYaml) + if err != nil { + return fmt.Errorf("error occurred during parsing of metadata: %w", err) + } + + err = r.initializeProjectReaders(ctx, parsedMetadata) + if err != nil { + return err + } + + return r.initializeMetricsBuilder(parsedMetadata) +} + +func (r *googleCloudSpannerReceiver) initializeProjectReaders(ctx context.Context, + parsedMetadata []*metadata.MetricsMetadata) error { + + readerConfig := statsreader.ReaderConfig{ + BackfillEnabled: r.config.BackfillEnabled, + TopMetricsQueryMaxRows: r.config.TopMetricsQueryMaxRows, + HideTopnLockstatsRowrangestartkey: r.config.HideTopnLockstatsRowrangestartkey, + TruncateText: r.config.TruncateText, + } + + for _, project := range r.config.Projects { + projectReader, err := newProjectReader(ctx, r.logger, project, parsedMetadata, readerConfig) + if err != nil { + return err + } + + r.projectReaders = append(r.projectReaders, projectReader) + } + + return nil +} + +func (r *googleCloudSpannerReceiver) initializeMetricsBuilder(parsedMetadata []*metadata.MetricsMetadata) error { + r.logger.Debug("Constructing metrics builder") + + projectAmount := len(r.config.Projects) + instanceAmount := 0 + databaseAmount := 0 + + for _, project := range r.config.Projects { + instanceAmount += len(project.Instances) + + for _, instance := range project.Instances { + databaseAmount += len(instance.Databases) + } + } + + factoryConfig := &filterfactory.ItemFilterFactoryConfig{ + MetadataItems: parsedMetadata, + TotalLimit: r.config.CardinalityTotalLimit, + ProjectAmount: projectAmount, + InstanceAmount: instanceAmount, + DatabaseAmount: databaseAmount, + } + itemFilterResolver, err := filterfactory.NewItemFilterResolver(r.logger, factoryConfig) + if err != nil { + return err + } + + r.metricsBuilder = metadata.NewMetricsFromDataPointBuilder(itemFilterResolver) + + return nil +} + +func newProjectReader(ctx context.Context, logger *zap.Logger, project Project, parsedMetadata []*metadata.MetricsMetadata, + readerConfig statsreader.ReaderConfig) (*statsreader.ProjectReader, error) { + logger.Debug("Constructing project reader for project", zap.String("project id", project.ID)) + + databaseReadersCount := 0 + for _, instance := range project.Instances { + databaseReadersCount += len(instance.Databases) + } + + databaseReaders := make([]statsreader.CompositeReader, databaseReadersCount) + databaseReaderIndex := 0 + for _, instance := range project.Instances { + for _, database := range instance.Databases { + logger.Debug("Constructing database reader for combination of project, instance, database", + zap.String("project id", project.ID), zap.String("instance id", instance.ID), zap.String("database", database)) + + databaseID := datasource.NewDatabaseID(project.ID, instance.ID, database) + + databaseReader, err := statsreader.NewDatabaseReader(ctx, parsedMetadata, databaseID, + project.ServiceAccountKey, readerConfig, logger) + if err != nil { + return nil, err + } + + databaseReaders[databaseReaderIndex] = databaseReader + databaseReaderIndex++ + } + } + + return statsreader.NewProjectReader(databaseReaders, logger), nil +} diff --git a/receiver/googlecloudspannerreceiver/receiver_test.go b/receiver/googlecloudspannerreceiver/receiver_test.go new file mode 100644 index 000000000000..ca7cad7b8d7e --- /dev/null +++ b/receiver/googlecloudspannerreceiver/receiver_test.go @@ -0,0 +1,322 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package googlecloudspannerreceiver + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap/zaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/statsreader" +) + +const ( + serviceAccountValidPath = "testdata/serviceAccount.json" + serviceAccountInvalidPath = "does not exist" +) + +type mockCompositeReader struct { + mock.Mock +} + +func (r *mockCompositeReader) Name() string { + return "mockCompositeReader" +} + +func (r *mockCompositeReader) Read(ctx context.Context) ([]*metadata.MetricsDataPoint, error) { + args := r.Called(ctx) + return args.Get(0).([]*metadata.MetricsDataPoint), args.Error(1) +} + +func (r *mockCompositeReader) Shutdown() { + // Do nothing +} + +type metricsBuilder struct { + throwErrorOnShutdown bool +} + +func newMetricsBuilder(throwErrorOnShutdown bool) metadata.MetricsBuilder { + return &metricsBuilder{ + throwErrorOnShutdown: throwErrorOnShutdown, + } +} + +func (b *metricsBuilder) Build([]*metadata.MetricsDataPoint) (pmetric.Metrics, error) { + return pmetric.NewMetrics(), nil +} + +func (b *metricsBuilder) Shutdown() error { + if b.throwErrorOnShutdown { + return errors.New("error on shutdown") + } + + return nil +} + +func TestNewGoogleCloudSpannerReceiver(t *testing.T) { + logger := zaptest.NewLogger(t) + cfg := createDefaultConfig().(*Config) + receiver := newGoogleCloudSpannerReceiver(logger, cfg) + + require.NotNil(t, receiver) + + assert.Equal(t, logger, receiver.logger) + assert.Equal(t, cfg, receiver.config) +} + +func createConfig(serviceAccountPath string) *Config { + cfg := createDefaultConfig().(*Config) + + instance := Instance{ + ID: "instanceID", + Databases: []string{"databaseName"}, + } + + project := Project{ + ID: "projectID", + Instances: []Instance{instance}, + ServiceAccountKey: serviceAccountPath, + } + + cfg.Projects = []Project{project} + + return cfg +} + +func TestStart(t *testing.T) { + testCases := map[string]struct { + serviceAccountPath string + expectError bool + }{ + "Happy path": {serviceAccountValidPath, false}, + "With project readers initialization error": {serviceAccountInvalidPath, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + logger := zaptest.NewLogger(t) + cfg := createConfig(testCase.serviceAccountPath) + host := componenttest.NewNopHost() + + receiver := newGoogleCloudSpannerReceiver(logger, cfg) + + require.NotNil(t, receiver) + + err := receiver.Start(context.Background(), host) + + if testCase.expectError { + require.Error(t, err) + assert.Empty(t, receiver.projectReaders) + } else { + require.NoError(t, err) + assert.Len(t, receiver.projectReaders, 1) + } + }) + } +} + +func TestInitialize(t *testing.T) { + testCases := map[string]struct { + serviceAccountPath string + expectError bool + replaceMetadataConfig bool + }{ + "Happy path": {serviceAccountValidPath, false, false}, + "With error": {serviceAccountInvalidPath, true, false}, + "With metadata config error": {serviceAccountInvalidPath, true, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + logger := zaptest.NewLogger(t) + cfg := createConfig(testCase.serviceAccountPath) + + receiver := newGoogleCloudSpannerReceiver(logger, cfg) + + require.NotNil(t, receiver) + + yaml := metadataYaml + + if testCase.replaceMetadataConfig { + metadataYaml = []byte{1} + } + + err := receiver.initialize(context.Background()) + + if testCase.replaceMetadataConfig { + metadataYaml = yaml + } + + if testCase.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestInitializeProjectReaders(t *testing.T) { + testCases := map[string]struct { + serviceAccountPath string + expectError bool + }{ + "Happy path": {serviceAccountValidPath, false}, + "With error": {serviceAccountInvalidPath, true}, + "With metadata config error": {serviceAccountInvalidPath, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + logger := zaptest.NewLogger(t) + cfg := createConfig(testCase.serviceAccountPath) + + receiver := newGoogleCloudSpannerReceiver(logger, cfg) + + require.NotNil(t, receiver) + + err := receiver.initializeProjectReaders(context.Background(), []*metadata.MetricsMetadata{}) + + if testCase.expectError { + require.Error(t, err) + assert.Empty(t, receiver.projectReaders) + } else { + require.NoError(t, err) + assert.Len(t, receiver.projectReaders, 1) + } + }) + } +} + +func TestInitializeMetricsBuilder(t *testing.T) { + logger := zaptest.NewLogger(t) + cfg := createConfig(serviceAccountValidPath) + testCases := map[string]struct { + metadataItems []*metadata.MetricsMetadata + expectError bool + }{ + "Happy path": {[]*metadata.MetricsMetadata{{}}, false}, + "With error": {[]*metadata.MetricsMetadata{}, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + receiver := newGoogleCloudSpannerReceiver(logger, cfg) + + require.NotNil(t, receiver) + + err := receiver.initializeMetricsBuilder(testCase.metadataItems) + + if testCase.expectError { + require.Error(t, err) + require.Nil(t, receiver.metricsBuilder) + } else { + require.NoError(t, err) + require.NotNil(t, receiver.metricsBuilder) + } + }) + } +} + +func TestNewProjectReader(t *testing.T) { + testCases := map[string]struct { + serviceAccountPath string + expectError bool + }{ + "Happy path": {serviceAccountValidPath, false}, + "With error": {serviceAccountInvalidPath, true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + logger := zaptest.NewLogger(t) + cfg := createConfig(testCase.serviceAccountPath) + var parsedMetadata []*metadata.MetricsMetadata + + reader, err := newProjectReader(context.Background(), logger, cfg.Projects[0], parsedMetadata, + statsreader.ReaderConfig{}) + + if testCase.expectError { + require.Error(t, err) + assert.Nil(t, reader) + } else { + require.NoError(t, err) + assert.NotNil(t, reader) + } + }) + } +} + +func TestScrape(t *testing.T) { + logger := zaptest.NewLogger(t) + testCases := map[string]struct { + metricsBuilder metadata.MetricsBuilder + expectedError error + }{ + "Happy path": {newMetricsBuilder(false), nil}, + "With error": {newMetricsBuilder(true), errors.New("error")}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + cfg := createDefaultConfig().(*Config) + receiver := newGoogleCloudSpannerReceiver(logger, cfg) + mcr := &mockCompositeReader{} + + require.NotNil(t, receiver) + + receiver.projectReaders = []statsreader.CompositeReader{mcr} + receiver.metricsBuilder = testCase.metricsBuilder + ctx := context.Background() + + mcr.On("Read", ctx).Return([]*metadata.MetricsDataPoint{}, testCase.expectedError) + + _, err := receiver.Scrape(ctx) + + mcr.AssertExpectations(t) + assert.Equal(t, testCase.expectedError, err) + }) + } +} + +func TestGoogleCloudSpannerReceiver_Shutdown(t *testing.T) { + logger := zaptest.NewLogger(t) + projectReader := statsreader.NewProjectReader([]statsreader.CompositeReader{}, logger) + + testCases := map[string]struct { + metricsBuilder metadata.MetricsBuilder + expectError bool + }{ + "Happy path": {newMetricsBuilder(false), false}, + "With error": {newMetricsBuilder(true), true}, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + receiver := &googleCloudSpannerReceiver{ + projectReaders: []statsreader.CompositeReader{projectReader}, + metricsBuilder: testCase.metricsBuilder, + } + ctx := context.Background() + ctx, receiver.cancel = context.WithCancel(ctx) + + err := receiver.Shutdown(ctx) + + if testCase.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/receiver/googlecloudspannerreceiver/testdata/config.yaml b/receiver/googlecloudspannerreceiver/testdata/config.yaml new file mode 100644 index 000000000000..64680f806e74 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/testdata/config.yaml @@ -0,0 +1,30 @@ +googlecloudspanner: + collection_interval: 120s + top_metrics_query_max_rows: 10 + backfill_enabled: true + cardinality_total_limit: 200000 + hide_topn_lockstats_rowrangestartkey: true + truncate_text: true + projects: + - project_id: "spanner project 1" + service_account_key: "path to spanner project 1 service account json key" + instances: + - instance_id: "id1" + databases: + - "db11" + - "db12" + - instance_id: "id2" + databases: + - "db21" + - "db22" + - project_id: "spanner project 2" + service_account_key: "path to spanner project 2 service account json key" + instances: + - instance_id: "id3" + databases: + - "db31" + - "db32" + - instance_id: "id4" + databases: + - "db41" + - "db42" diff --git a/receiver/googlecloudspannerreceiver/testdata/metadata_invalid.yaml b/receiver/googlecloudspannerreceiver/testdata/metadata_invalid.yaml new file mode 100644 index 000000000000..2ffde7d593f0 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/testdata/metadata_invalid.yaml @@ -0,0 +1,15 @@ +metadata: + - name: "invalid" + query: "query" + metric_name_prefix: "metric_name_prefix" + labels: + - name: "label_name" + column_name: "LABEL_NAME" + value_type: "invalid" + metrics: + - name: "metric_name" + column_name: "METRIC_NAME" + value_type: "int" + data: + type: "invalid" + unit: "metricUnit" \ No newline at end of file diff --git a/receiver/googlecloudspannerreceiver/testdata/metadata_not_yaml.yaml b/receiver/googlecloudspannerreceiver/testdata/metadata_not_yaml.yaml new file mode 100644 index 000000000000..f85e3a96a1fb --- /dev/null +++ b/receiver/googlecloudspannerreceiver/testdata/metadata_not_yaml.yaml @@ -0,0 +1 @@ +notyaml \ No newline at end of file diff --git a/receiver/googlecloudspannerreceiver/testdata/metadata_valid.yaml b/receiver/googlecloudspannerreceiver/testdata/metadata_valid.yaml new file mode 100644 index 000000000000..a9b25e894246 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/testdata/metadata_valid.yaml @@ -0,0 +1,38 @@ +metadata: +# +# -------------------------------------------- Current Stats ----------------------------------------------------------- +# + - name: "current stats" + query: "query" + metric_name_prefix: "metric_name_prefix" + high_cardinality: false + labels: + - name: "label_name" + column_name: "LABEL_NAME" + value_type: "string" + metrics: + - name: "metric_name" + column_name: "METRIC_NAME" + value_type: "int" + data: + type: "gauge" + unit: "metric_unit" +# +# -------------------------------------------- Interval Stats ---------------------------------------------------------- +# + - name: "interval stats" + query: "query" + metric_name_prefix: "metric_name_prefix" + timestamp_column_name: "timestamp_column_name" + high_cardinality: true + labels: + - name: "label_name" + column_name: "LABEL_NAME" + value_type: "string" + metrics: + - name: "metric_name" + column_name: "METRIC_NAME" + value_type: "int" + data: + type: "gauge" + unit: "metric_unit" \ No newline at end of file diff --git a/receiver/googlecloudspannerreceiver/testdata/serviceAccount.json b/receiver/googlecloudspannerreceiver/testdata/serviceAccount.json new file mode 100644 index 000000000000..1e9a93e54193 --- /dev/null +++ b/receiver/googlecloudspannerreceiver/testdata/serviceAccount.json @@ -0,0 +1,12 @@ +{ + "type": "service_account", + "project_id": "qwerty", + "private_key_id": "qwerty", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDDKusrw23c7AsN\nQCxprTTyywmH1L0f5kJhqVrG+5KxlAO5ivX3NG4XimkzaI5zz3ROb9X2zNNIJ//n\nudEL4ke8oM87RPPAgyAQ7hIevFew2r8FHBToQki7Pl6+UcmAU67Lsqlv4UVNBbzv\nai89POKurAp5S6qTo3tV9OilnMiBirIIbx1l103BLG+7ZgDEkrxJZ/ysF+xSy/AC\n/Rs1YTIp8kyxLisHJq8SShKxLtywuOeJerVAJD0s1pVVORAOK6OJVCB23SDzMZZM\no1rIrrd5NkqNtP9YniceDUSk5s1ePC6UWZdMUpIXuW396+EuyaqMT2j1J7i4agce\nPgwrXbtVAgMBAAECggEAWkJuEoZGN+a+Ubl9EL52/1ZhKP19HdSanJn7Do1oUVH3\nywZ2LIaF58MuVgUj5HFsA557ILGngPZmxl9MS6exr43baTjBViYOXWYM4UQPdg4k\nS5OqI6QMCPSWUoR2oS+SZkhRThmruCxQLZU5FaQj3c5Y5bHVyIPR1XX3Zig1Hpsb\nQHwYC/Nkl60u2GPJfutoyZiBuYQVkwIEWTDBbPbrR2amw6YR2HhOOzXlgM9OmC12\nE11q1IQE6r7vc4BK4jrRT7zgxpjeshLjI82jxzGM7R1ZidDD9LnO2s9oBkwE1bJ2\nJvTkzGHPaEYIEie8VL8MePxmDXSLEospu6NEOUwsvQKBgQD4pY9J693JBvVu/5xX\ndasI0XYpp77Yx8L8O8xxySTxPbZGIzPkZ5yGlMefvIYKkB5eSCeWZhVvbL/Jdbkt\ni8tVVtTxUGRgqeHlkGbRDXTdvGKo2qlGsXObIaux1ISop2vEfq21VabIh66YIRFL\nhiGnmnORgLJI5YBgOWfCdsyF5wKBgQDI8HuQRuvbLNEPQ81coFzC5DljK6ACBk3j\nY/QFNiRh2Ao1LQPd4lLWUeiTfCpmrWul/bdPLqqCHYpiAMNtSV/YQ8sWSeg03PlB\n2JE9zNnH6QP9Zl4MtxkVVb/nUEtYe+6lJXc2oJbb7sf60FixV4JWyGWTYPFSvg6E\nkdW9dSYVYwKBgChJ0zTcFfyrtvr8Sd3WABeWsPnA52iCvbJXEiJhwC2inTUyIQdi\nTnd0BgB48JMnlPQb5uY0tkZurYi8HXwmyZSTVD+hkhIjlKm4wyAeeRAwpR/NBl6h\nBCVfyE0xLSmSryCQkh7uuO6HJaAaw42kNzHMEevCmaC8JxfwVUKqMyuDAoGBAMIU\npQRhvAVicDrSPlSs/2uujE9hH5dVB34OWO9/r/xhctqRtB5oL6KaUo3BbCLodgVJ\n3fg5Fq++YJ7wJdI1AMIeGNZaZFJK7OXQi9ipN+CDDDuA2G5nm26j+EsvntbEPWh9\nm3vD+HJNXBLBkikYYOf5f0Kua+iDcfpR8aSgtjwJAoGBAOQLi1i7crOEvj+g5k23\nOdCXrFapkmzyW+HHPXLm2Sz/4UGJjGfUKAEelfLFFJBs9FLRHbwSndvRjcf/oXcl\nLT+4/eBI8eOQRAbRNg86BGTn+iTLPF2qFZqTQQtq0BHOXO/7jOI4H36i1uTcvf6+\n4w0afAT+jEpv2LBZWNT2XYdK\n-----END PRIVATE KEY-----\n", + "client_email": "qwerty@qwerty.iam.gserviceaccount.com", + "client_id": "123", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/qwerty%40qwerty.iam.gserviceaccount.com" +} diff --git a/reports/distributions/contrib.yaml b/reports/distributions/contrib.yaml index da72e17fade2..0171a6edd111 100644 --- a/reports/distributions/contrib.yaml +++ b/reports/distributions/contrib.yaml @@ -131,6 +131,7 @@ components: - fluentforward - github - googlecloudmonitoring + - googlecloudspanner - haproxy - hostmetrics - httpcheck diff --git a/versions.yaml b/versions.yaml index 922243793e69..075a66c68ca2 100644 --- a/versions.yaml +++ b/versions.yaml @@ -220,6 +220,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/githubreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudmonitoringreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudpubsubreceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/haproxyreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/httpcheckreceiver